repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
weaveworks-experiments/fluxctl-rollout
https://github.com/weaveworks-experiments/fluxctl-rollout
4f5d227a2bdfa2e6daa351807ad700a740a0df71
8ef162367b40fa2666c692364139a10117435529
f98dd09b449a96c27d0e74e884d055117cddcb8d
refs/heads/master
2021-01-22T13:52:37.630018
2017-08-29T21:50:29
2017-08-29T21:50:29
100,695,886
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6018306612968445, "alphanum_fraction": 0.6086956262588501, "avg_line_length": 15.807692527770996, "blob_id": "9403cab6294a49f625cce05b71845ef267ff6dbb", "content_id": "b10f5128ae2802c589fedfabec995eff44c25547", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 437, "license_type": "permissive", "max_line_length": 64, "num_lines": 26, "path": "/main.go", "repo_name": "weaveworks-experiments/fluxctl-rollout", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"os\"\n\n\t\"github.com/pkg/errors\"\n)\n\nfunc run(args []string) int {\n\trootCmd := newRoot().Command()\n\trootCmd.SetArgs(args)\n\tif cmd, err := rootCmd.ExecuteC(); err != nil {\n\t\terr = errors.Cause(err)\n\t\tswitch err := err.(type) {\n\t\tdefault:\n\t\t\tcmd.Println(\"Error: \" + err.Error())\n\t\t\tcmd.Printf(\"Run '%v --help' for usage.\\n\", cmd.CommandPath())\n\t\t}\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc main() {\n\tos.Exit(run(os.Args[1:]))\n}\n" }, { "alpha_fraction": 0.7212932109832764, "alphanum_fraction": 0.7703455686569214, "avg_line_length": 28.899999618530273, "blob_id": "9c729d98bb4b053269d7b42054c670a54cc58065", "content_id": "4d2f02881ca55dd9847befd937a93ea362522f79", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1794, "license_type": "permissive", "max_line_length": 113, "num_lines": 60, "path": "/README.md", "repo_name": "weaveworks-experiments/fluxctl-rollout", "src_encoding": "UTF-8", "text": "# fluxctl-rollout\n\nfluxctl-rollout is a prototype of a hypothetical fluxctl rollout subcommand.\n\nIt operates directly on Istio-Kubernetes yaml files only and assumes\n[gitops](https://www.weave.works/blog/gitops-operations-by-pull-request) for\nthe rest. All state required is stored in annotations on Deployment objects\n(probably).\n\nIt could later be integrated into Flux so that it runs server-side (in the\nuser's cluster) at the point where Flux is operating on checked-out yaml files\nfrom the source of truth config repo.\n\nhttps://docs.google.com/document/d/1Mf13PgRWrouc1Ly6IkenDgFkqS7-4jtPf6B5ofrVHXk/edit#heading=h.kg8bv2nefkii\n\nBasic workflow:\n\n```\nstage -> (check) -> release\n \\-> abort\n\n```\n\n# Usage:\n\n## Blue-green\n\n```\n$ fluxctl-rollout stage bluegreen --update-image=myapp:master-8da5ca3\nRollout id: a1b2c3d4e5\nOld tag: myapp:master-7c04b92\nNew tag: myapp:master-8da5ca3\nURL for new tag: http://blah:39191/\nUse 'fluxctl-rollout abort a1b2c3d4e5' to cancel, or 'fluxctl-rollout release a1b2c3d4e5' to finish the rollout.\n\n$ fluxctl-rollout release a1b2c3d4e5\nCompleting rollout... done.\n100% user traffic now reaching New deployment.\nCleaning up Old deployment... done\n\n$ fluxctl-rollout list\n<shows in-flight rollouts>\n```\n\n## Canary\n\n```\n$ fluxctl-rollout stage canary --update-image=myapp:master-8da5ca3\nRollout id: a1b2c3d4e5\nOld tag: myapp:master-7c04b92\nNew tag: myapp:master-8da5ca3\n5% of traffic is now being routed to master-8da5ca3.\nCheck your monitoring to see if you like the new version.\nUse 'fluxctl-rollout a1b2c3d4e5 abort' to cancel, or 'fluxctl-rollout a1b2c3d4e5 complete' to finish the rollout.\n\n$ fluxctl-rollout release a1b2c3d4e5\nCompleting rollout... done.\n100% user traffic now reaching New deployment.\nCleaning up Old deployment... done\n```\n" }, { "alpha_fraction": 0.7021359205245972, "alphanum_fraction": 0.7413592338562012, "avg_line_length": 23.29245376586914, "blob_id": "87b5c6363ab3b10925221da49320150ddc9055a5", "content_id": "2e2fd7dd233531e790bab9a75e449b3959687641", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2575, "license_type": "permissive", "max_line_length": 113, "num_lines": 106, "path": "/root_cmd.go", "repo_name": "weaveworks-experiments/fluxctl-rollout", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/pkg/errors\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\nvar rootLongHelp = strings.TrimSpace(`\nfluxctl-rollout is a prototype of a hypothetical fluxctl rollout subcommand.\n\nIt operates directly on Istio-Kubernetes yaml files only and assumes gitops for\nthe rest. All state required is stored in annotations on Deployment objects\n(probably).\n\nIt could later be integrated into Flux so that it runs server-side (in the\nuser's cluster) at the point where Flux is operating on checked-out yaml files\nfrom the source of truth config repo.\n\nhttps://docs.google.com/document/d/1Mf13PgRWrouc1Ly6IkenDgFkqS7-4jtPf6B5ofrVHXk/edit#heading=h.kg8bv2nefkii\n\nBasic workflow:\n\nstage -> (check) -> release\n \\-> abort\n\nUsage:\n\n# Blue-green\n\n$ fluxctl-rollout stage bluegreen --update-image=myapp:master-8da5ca3\nRollout id: a1b2c3d4e5\nOld tag: myapp:master-7c04b92\nNew tag: myapp:master-8da5ca3\nURL for new tag: http://blah:39191/\nUse 'fluxctl-rollout abort a1b2c3d4e5' to cancel, or 'fluxctl-rollout release a1b2c3d4e5' to finish the rollout.\n\n$ fluxctl-rollout release a1b2c3d4e5\nCompleting rollout... done.\n100% user traffic now reaching New deployment.\nCleaning up Old deployment... done\n\n$ fluxctl-rollout list\n<shows in-flight rollouts>\n\n# Canary\n\n$ fluxctl-rollout stage canary --update-image=myapp:master-8da5ca3\nRollout id: a1b2c3d4e5\nOld tag: myapp:master-7c04b92\nNew tag: myapp:master-8da5ca3\n5% of traffic is now being routed to master-8da5ca3.\nCheck your monitoring to see if you like the new version.\nUse 'fluxctl-rollout a1b2c3d4e5 abort' to cancel, or 'fluxctl-rollout a1b2c3d4e5 complete' to finish the rollout.\n\n$ fluxctl-rollout release a1b2c3d4e5\nCompleting rollout... done.\n100% user traffic now reaching New deployment.\nCleaning up Old deployment... done\n`)\n\nconst istio_kube_config = `\napiVersion: istio.io/v1alpha1\nkind: IstioConfig\nmetadata:\n name: route-rule-details-default\n namespace: default\nspec:\n destination: reviews.default.svc.cluster.local\n precedence: 1\n route:\n - tags:\n version: v1\n weight: 50\n - tags:\n version: v3\n weight: 50\n`\n\nconst kube_dep_config = `\n`\n\nfunc (opts *rootOpts) Command() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"fluxctl-rollout\",\n\t\tLong: rootLongHelp,\n\t\tSilenceUsage: true,\n\t\tSilenceErrors: true,\n\t}\n\n\tcmd.AddCommand(\n\t\tnewVersionCommand(),\n\t\tnewStage(opts).Command(),\n\t\tnewRelease(opts).Command(),\n\t\tnewList(opts).Command(),\n\t\tnewAbort(opts).Command(),\n\t)\n\n\treturn cmd\n}\n" }, { "alpha_fraction": 0.6035131812095642, "alphanum_fraction": 0.6455457806587219, "avg_line_length": 38.849998474121094, "blob_id": "b9e765f003dcd202cb68128cf7e3252045ee96df", "content_id": "2d587be2f2537b79e3ae71c8c87b8162e135f7be", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1594, "license_type": "permissive", "max_line_length": 139, "num_lines": 40, "path": "/prototype.py", "repo_name": "weaveworks-experiments/fluxctl-rollout", "src_encoding": "UTF-8", "text": "# A prototype for the data structures & organization of the\n# prototype. For discussion, then will spell out as a more complete\n# implementation in Golang.\n#\n# Read main(), then run me to see the output...\n\nfrom modules import (Deployment, Yamels, CanaryRollout,\n BlueGreenRollout)\n\ndef main():\n y = Yamels([Deployment(\"front-end\", replicas=7, primary=True)])\n # Scenario 1 - stage and release\n r = CanaryRollout(y, \"front-end\", \"v2\")\n r.stage()\n r.release()\n # Scenario 2 - stage and abort\n r = BlueGreenRollout(y, \"front-end--v2\", \"v3\")\n r.stage()\n r.abort()\n\n\"\"\"\naction | deployment:replicas,traffic%,[* = primary]\n--------------------------------------------------------\ninit(v2) | front-end*:n,100%\ncanary() | front-end*:n,95%, front-end--v2:n,5%\nSyncing: <Deployment front-end, primary=True, percent=95, replicas=7> <Deployment front-end--v2, primary=False, percent=5, replicas=7>\n\nrelease() | front-end:0,0%, front-end--v2*:n,100%\nSyncing: <Deployment front-end, primary=False, percent=0, replicas=0> <Deployment front-end--v2, primary=True, percent=100, replicas=7>\n\ninit(v3) | front-end--v2*:n,100%\nbluegreen() | front-end--v2*:n,100%, front-end--v3:n,0%\nSyncing: <Deployment front-end--v2, primary=True, percent=100, replicas=7> <Deployment front-end--v3, primary=False, percent=0, replicas=7>\n\nabort() | front-end--v2*:n,100%, front-end--v3:0,0%\nSyncing: <Deployment front-end--v2, primary=True, percent=100, replicas=7> <Deployment front-end--v3, primary=False, percent=0, replicas=0>\n\"\"\"\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.618488609790802, "alphanum_fraction": 0.6247248649597168, "avg_line_length": 34.40259552001953, "blob_id": "7fe18603dc285b291ef6683a5e1598b02929ebe4", "content_id": "654bac5556b478fd165ccd5e38b1386562abf4c3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2726, "license_type": "permissive", "max_line_length": 72, "num_lines": 77, "path": "/modules.py", "repo_name": "weaveworks-experiments/fluxctl-rollout", "src_encoding": "UTF-8", "text": "class Yamels(object):\n def __init__(self, deployments=[], rollouts=[]):\n self.deployments = deployments\n self.rollouts = rollouts\n def load(self):\n pass\n def save(self):\n pass\n def find(self, deploymentName):\n for d in self.deployments:\n if d.name == deploymentName:\n return d\n raise(KeyError(deploymentName))\n def sync(self, *deployments):\n print \"Syncing:\",\n # Record the deployments in-memory at least\n for d in deployments:\n try:\n self.find(d)\n except KeyError:\n self.deployments.append(d)\n print d,\n print\n\nclass Deployment(object):\n def __init__(self, name, replicas=0, primary=False):\n self.name = name\n self.primary = primary\n self.percent = 0\n self.replicas = replicas\n def __str__(self):\n return (\"<Deployment %s, primary=%s, percent=%d, replicas=%d>\" %\n (self.name, self.primary, self.percent, self.replicas))\n\ndef replaceVersion(deploymentName, targetVersion):\n if \"--\" in deploymentName:\n deploymentName, _ = deploymentName.split(\"--\")\n return deploymentName + \"--\" + targetVersion\n\nclass Rollout(object):\n def __init__(self, yamels, deploymentName, targetVersion):\n self.yamels = yamels\n self.fromDeployment = self.yamels.find(deploymentName)\n self.originalReplicas = self.fromDeployment.replicas\n self.toDeployment = Deployment(\n replaceVersion(deploymentName, targetVersion),\n replicas=self.originalReplicas,\n )\n def stage(self):\n self.toDeployment.percent = self.initialPercent\n self.fromDeployment.percent = 100 - self.initialPercent\n self.yamels.sync(self.fromDeployment, self.toDeployment)\n def release(self):\n self.fromDeployment.primary = False\n self.toDeployment.primary = True\n self.fromDeployment.percent = 0\n self.toDeployment.percent = 100\n self.fromDeployment.replicas = 0\n self.yamels.sync(self.fromDeployment, self.toDeployment)\n def abort(self):\n self.fromDeployment.percent = 100\n self.toDeployment.percent = 0\n self.toDeployment.replicas = 0 # TODO: Delete this.\n self.yamels.sync(self.fromDeployment, self.toDeployment)\n\nclass CanaryRollout(Rollout):\n initialPercent = 5\n def __init__(self, *a, **kw):\n return Rollout.__init__(self, *a, **kw)\n\nclass BlueGreenRollout(Rollout):\n initialPercent = 0\n def __init__(self, *a, **kw):\n return Rollout.__init__(self, *a, **kw)\n\n# TODO: clean up old deployments, only really possible when flux\n# supports deleting things.\n" } ]
5
abumaru/rotate_my_wellpathh
https://github.com/abumaru/rotate_my_wellpathh
94d91e02921c27cf0609261e94ba554c895b8bb8
ce33be25c41c1c09efb03fe7c030337dbf88e5ad
b50bc34f092984a1fe19a7a190650605365a0418
refs/heads/master
2020-09-22T22:57:18.670049
2019-12-02T18:50:27
2019-12-02T18:50:27
225,338,602
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4820389449596405, "alphanum_fraction": 0.4974914789199829, "avg_line_length": 38.844261169433594, "blob_id": "ef9572228fe488e64f0645b2fff36d6ac60131de", "content_id": "652b6f80a5b0754e4e5418a9cbde367d1ca0acbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4983, "license_type": "no_license", "max_line_length": 194, "num_lines": 122, "path": "/rotate_my_well.py", "repo_name": "abumaru/rotate_my_wellpathh", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 19 03:07:09 2019\r\n\r\n@author: GOGUT\r\nscript to rotate your well \r\n\"\"\"\r\n#\r\n#\r\n# =============================================================================\r\n# modules import \r\n# =============================================================================\r\n#import numpy as np\r\nimport math \r\nimport pandas as pd\r\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\r\nimport matplotlib.pyplot as plt\r\nfrom pathlib import Path, PureWindowsPath\r\n#\r\n#\r\n# =============================================================================\r\n# INPUT DATA:\r\n# =============================================================================\r\n# I've explicitly declared my path as being in Windows format, so I can use forward slashes in it.\r\ndata_folder = PureWindowsPath(r\"Y:\\ressim\\lavani-deep\\dg2\\simu2\\include\\well_paths\\Rotated_wellpaths\")\r\ndata_name = \"LS2.xlsx\"\r\nrot_angle_degrees = -19\r\nrotation_point_index = 32\r\n#\r\n#\r\n# =============================================================================\r\n# generating the file paths \r\n# =============================================================================\r\n# Convert path to the right format for the current operating system\r\ncorrect_path = Path(data_folder / data_name)\r\n# prints \"source_data/text_files/raw_data.txt\" on Mac and Linux\r\n# prints \"source_data\\text_files\\raw_data.txt\" on Windows\r\ndata_name_lenght = len(data_name)\r\ndata_name_point = data_name.find('.')\r\ndata_name_new = data_name[0:data_name_point]+str(rot_angle_degrees)+data_name[-5:]\r\ndata_name_new_txt = data_name[0:data_name_point]+str(rot_angle_degrees)+\".dev\"\r\nnew_file = Path(data_folder / data_name_new)\r\nnew_file2 = Path(data_folder / data_name_new_txt)\r\nprint(new_file)\r\nprint(new_file2)\r\n#\r\n#\r\n\r\n#\r\n#\r\n# =============================================================================\r\n# functions for rotation\r\n# =============================================================================\r\ndef Rotatepointx(x,y,cx,cy,angl):\r\n xrot = math.cos(math.radians(angl))*(x-cx)-math.sin(math.radians(angl))*(y-cy)+cx\r\n return xrot\r\ndef Rotatepointy(x,y,cx,cy,angl): \r\n yrot = math.sin(math.radians(angl))*(x-cx)+math.cos(math.radians(angl))*(y-cy)+cy\r\n return yrot\r\n#\r\n#\r\n# =============================================================================\r\n# reading from data from excel file to pandas \r\n# =============================================================================\r\ndf= pd.read_excel(correct_path)\r\n'''\r\nColumns in dataset: East_X\tNorth_Y\tTVDMSL\tMDMS\r\n'''\r\ndf['TVD_neg']=df['TVDMSL']*(-1) # negative TVD\r\ndf['MD_diff']= df['MDMSL'].diff() # delta MD\r\ndf['TVD_diff']= df['TVDMSL'].diff() # delta TVD\r\ndf['Disp']=((df['MD_diff']**2)-(df['TVD_diff']**2))**0.5 # Displacement\r\ndf['Cum.Disp']=df['Disp'].cumsum() # Cumulative displacement \r\ndf['ratio Disp/MD_diff']= df['Disp']/df['MD_diff'] # ratio. intermediate calculation\r\ndf['Incl_degrees']= [math.degrees(math.asin(row)) for row in df['ratio Disp/MD_diff']] # Calculates the inclication at each survey.\r\ndf['Cum.Disp'].fillna(0,inplace=True) # replacing from nan values to cero.\r\ndf2= df[['East_X','North_Y','MDMSL','Cum.Disp','TVD_neg','Incl_degrees']] # final dataset df2\r\ndf2['xrot']=Rotatepointx(df2['East_X'],df2['North_Y'],df2['East_X'].iloc[rotation_point_index],df2['North_Y'].iloc[rotation_point_index],rot_angle_degrees) # East_x rotated arround first point.\r\ndf2['yrot']=Rotatepointy(df2['East_X'],df2['North_Y'],df2['East_X'].iloc[rotation_point_index],df2['North_Y'].iloc[rotation_point_index],rot_angle_degrees) # North_y rotated arround first point.\r\n#\r\n#\r\n# =============================================================================\r\n# Plotting data in 3D\r\n# =============================================================================\r\n'''\r\nData from initial well survey\r\n'''\r\nx = list(df2['East_X'])\r\ny = list(df2['North_Y'])\r\nz = list(df2['TVD_neg'])\r\n'''\r\nData from rotated well survey\r\n'''\r\nx1 = list(df2['xrot'])\r\ny1 = list(df2['yrot'])\r\nz1 = list(df2['TVD_neg'])\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, projection='3d')\r\nax.scatter(x, y, z,color='b', marker='o') #initial well survey\r\nax.scatter(x1,y1,z1,color='r', marker='o')# rotated survey\r\nax.set_xlabel('East_X')\r\nax.set_ylabel('North_Y')\r\nax.set_zlabel('Z TVD')\r\n#\r\n#\r\n# =============================================================================\r\n# generates xlsx file with df2 as feed\r\n# =============================================================================\r\ndf2.to_excel(new_file,index=False)\r\nprint(\"new wellpath:\" ,new_file)\r\n#\r\n#\r\n# =============================================================================\r\n# generate a .dev file with df3 as feed\r\n# =============================================================================\r\n\r\ndf2['TVDMSL2']=df['TVDMSL'] \r\ndf3 = df2[['xrot','yrot','TVDMSL2','MDMSL',]]\r\nprint(df3)\r\ndf3.to_csv(new_file2, index=None, sep=' ', mode='a')\r\nprint(\"new wellpath:\" ,new_file2)\r\n" }, { "alpha_fraction": 0.7559523582458496, "alphanum_fraction": 0.7559523582458496, "avg_line_length": 41, "blob_id": "af80c3547285dd601c276f7d21472c544bcb0c88", "content_id": "113f50a24376f2ce34e4ab169277c36f322c0d6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 168, "license_type": "no_license", "max_line_length": 79, "num_lines": 4, "path": "/README.md", "repo_name": "abumaru/rotate_my_wellpathh", "src_encoding": "UTF-8", "text": "# rotate_my_wellpathh\ninput data needed:\nexcel file with East_m, North_m, TVD_m and MD,\nselect the file, and file name, alsow the angle (clockwise is a negative value)\n" } ]
2
AlexMasturkov/django-assignment1
https://github.com/AlexMasturkov/django-assignment1
90850632aa59d3320daf7b63866fca564a52dc54
ef1f4020cc605415100dea8b1bd880462662df32
51fa5ce54846017a80d03ce8ec1265ca3abae269
refs/heads/master
2020-03-24T23:53:32.804915
2018-08-01T13:31:17
2018-08-01T13:31:17
143,158,940
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5275397896766663, "alphanum_fraction": 0.5630354881286621, "avg_line_length": 30.423076629638672, "blob_id": "bb18362e3c8510d4643ed20090c5eea48de93e2d", "content_id": "fa620abb0c57cdfd30a0ccf5777fe25bf9e87398", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 114, "num_lines": 26, "path": "/snakeApp/snakes/migrations/0001_initial.py", "repo_name": "AlexMasturkov/django-assignment1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-28 15:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Snake',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128)),\n ('description', models.CharField(max_length=256)),\n ('color_pattern', models.CharField(max_length=128)),\n ('favourite_prey', models.CharField(max_length=128)),\n ('region', models.CharField(max_length=64)),\n ('venomous', models.BooleanField(default=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5159235596656799, "alphanum_fraction": 0.522292971611023, "avg_line_length": 23.153846740722656, "blob_id": "889d75fe277602084713d3a2e134fee4975a6620", "content_id": "f31adf4a8fa19cd926f2433f170db7f0f300dda0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 628, "license_type": "no_license", "max_line_length": 49, "num_lines": 26, "path": "/snakeApp/snakes/static/my_scripts.js", "repo_name": "AlexMasturkov/django-assignment1", "src_encoding": "UTF-8", "text": "$(\"#my_font\").on(\"mouseover\",function(){\n $(\"body\").css(\"font-family\", \"Impact\"); \n})\n\n$(\"button\").on(\"mouseover\",function(){\n $(this).css(\"color\",\"yellow\");\n});\n\n$(\"button\").on(\"mouseout\",function(){\n $(this).css(\"color\",\"white\");\n});\n\n$(\"#my_theme\").on(\"mouseover\",function(){\n $(\"h3\").css(\"color\",\"white\");\n $(\"body\").css(\"background-color\",\"teal\");\n $(\"form\").css(\"color\",\"white\");\n $(\"footer\").css(\"color\",\"white\");\n $(\"th\").css(\"background-color\",\"teal\");\n});\n\n$( \"footer\" ).click(function() {\n $(\"footer\").css(\"border\", \"3px double white\");\n $(\"footer\").css(\"font-size\",\" 1.2em\");\n\n \n });\n" }, { "alpha_fraction": 0.7015706896781921, "alphanum_fraction": 0.7382199168205261, "avg_line_length": 33.6363639831543, "blob_id": "e49f2a9218d30146a32103af4490b4e39a806f06", "content_id": "504cb37739791df896b95c6290e19bbbca0ae028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/snakeApp/snakes/models.py", "repo_name": "AlexMasturkov/django-assignment1", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\nclass Snake(models.Model):\n name = models.CharField(max_length=128)\n description = models.CharField(max_length=256)\n color_pattern = models.CharField(max_length=128)\n favourite_prey = models.CharField(max_length=128)\n region = models.CharField(max_length=64)\n venomous = models.BooleanField(default=True)\n\n" }, { "alpha_fraction": 0.6510263681411743, "alphanum_fraction": 0.6510263681411743, "avg_line_length": 33.20000076293945, "blob_id": "46e902531dcd41d0fe8cfb2d477214d9dab4114b", "content_id": "a028119f91bf18f82a8a530ed2a573ff053ed581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 67, "num_lines": 10, "path": "/snakeApp/snakes/urls.py", "repo_name": "AlexMasturkov/django-assignment1", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('snakes', views.snakes,name='snakes'),\n path('add-snake', views.add,name='add-snake'),\n path('edit-snake/<int:id>', views.edit,name='edit-snake'), \n path('delete-snake/<int:id>', views.delete,name='delete-snake')\n]" }, { "alpha_fraction": 0.5846846699714661, "alphanum_fraction": 0.5846846699714661, "avg_line_length": 31.676469802856445, "blob_id": "7fe5e38bb4a086405930e066ae24c75807623930", "content_id": "b57eb5c8a35cc4e91776d67d71ac236ad9af5300", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "no_license", "max_line_length": 91, "num_lines": 34, "path": "/snakeApp/snakes/forms.py", "repo_name": "AlexMasturkov/django-assignment1", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\nfrom django import forms\nfrom .models import Snake\n\nclass SnakeForm(forms.ModelForm):\n name = forms.CharField(\n label='Snake_Name',\n widget = forms.TextInput(attrs={'class': 'form-control'})\n )\n venomous = forms.BooleanField(\n label='Is venomous ?',\n widget = forms.CheckboxInput(),\n required=False)\n region = forms.ChoiceField(\n label='Snake_Region', \n choices=(\n ('asia','Asia'),\n ('europa','Europa'),\n ('africa','Africa'),\n ('south_america','South America'),\n ('north_america','Nort America'))\n )\n\n\n class Meta:\n model = Snake\n fields =['name','description','color_pattern','favourite_prey','region','venomous']\n form_classes = {'class':'form-control'}\n widgets={\n 'description':forms.TextInput(attrs=form_classes),\n 'color_pattern':forms.TextInput(attrs=form_classes),\n 'favourite_prey':forms.TextInput(attrs=form_classes),\n 'region':forms.TextInput(attrs=form_classes) \n }" }, { "alpha_fraction": 0.6413189768791199, "alphanum_fraction": 0.6473755240440369, "avg_line_length": 24.169490814208984, "blob_id": "5290fcf8aaa0dca9a2c2fa496291688dd55d4189", "content_id": "ff06465f52ca7a735349c3fe6eec13aa6f89b556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 64, "num_lines": 59, "path": "/snakeApp/snakes/views.py", "repo_name": "AlexMasturkov/django-assignment1", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom .models import Snake\nfrom .forms import SnakeForm\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html',{})\n\n\ndef snakes(request):\n snakes = Snake.objects.all()\n return render(request, 'snakes.html',{\n 'snakes': snakes\n })\n\ndef add(request):\n if request.method == 'POST':\n form = SnakeForm(request.POST)\n# capitalizing name string\n new_form = form.save(commit=False)\n name = form.cleaned_data['name']\n new_form.name = name.capitalize()\n new_form.save()\n \n messages.success(request, 'Snake has been added')\n return redirect('snakes')\n else:\n form = SnakeForm()\n return render(request, 'add.html', {\n 'form': form\n })\n\n\ndef edit(request, id):\n snake = get_object_or_404(Snake, pk=id)\n if request.method == 'POST':\n form = SnakeForm(request.POST, instance=snake)\n if form.is_valid(): \n form.save()\n messages.success(request, 'Snake has been edit')\n else:\n messages.error(request,'wrong') \n return redirect('snakes')\n else:\n form = SnakeForm(instance=snake)\n return render(request, 'edit.html', {\n 'form': form,\n 'snake': snake\n })\n\n\n\ndef delete(request, id):\n if request.method == 'POST':\n snake = get_object_or_404(Snake, pk=id)\n snake.delete() \n messages.success(request, 'Snake has been deleted')\n return redirect('snakes')\n\n" } ]
6
ChillFish8/tactix
https://github.com/ChillFish8/tactix
690e88cabbd7fa458840ef95a1fb3d7086db8868
88e56176335c769f9a560edf390cc462225db792
2eeb9318a9b40a0c6bbf8a655399ebd88bd03d35
refs/heads/master
2023-02-12T12:40:51.174962
2021-01-06T17:24:49
2021-01-06T17:24:49
327,327,621
2
2
null
null
null
null
null
[ { "alpha_fraction": 0.5720824003219604, "alphanum_fraction": 0.5732265710830688, "avg_line_length": 22.594594955444336, "blob_id": "16cc09e9681f6831e755f0ad1a71057c1de0c878", "content_id": "7ac909f862ffd3244bd6551c4c62da1e3ad4ee47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 874, "license_type": "no_license", "max_line_length": 66, "num_lines": 37, "path": "/src/runtime.rs", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "use tokio::runtime::{Builder, Runtime};\nuse futures::Future;\n\nuse tokio::task::JoinHandle;\nuse tokio::sync::mpsc::{unbounded_channel, UnboundedSender};\n\nuse std::thread;\nuse std::sync::mpsc::{sync_channel, Receiver};\n\n\nlazy_static! {\n pub static ref RUNTIME: Runtime = {\n Builder::new_multi_thread()\n .enable_all()\n .build()\n .expect(\"Failed runtime sanity check.\")\n };\n}\n\npub fn spawn<F>(future: F) -> JoinHandle<F::Output>\n where F: Future + Send + 'static,\n F::Output: Send + 'static,\n{\n RUNTIME.spawn(future)\n}\n\n\npub fn start_background() -> (UnboundedSender<()>, Receiver<()>) {\n let (set, mut waiter) = unbounded_channel::<()>();\n let (tx, rx) = sync_channel::<()>(0);\n let _ = thread::spawn(move || {\n RUNTIME.block_on(waiter.recv());\n let _ = tx.send(());\n });\n\n (set, rx)\n}\n\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.643750011920929, "avg_line_length": 17.5, "blob_id": "433e8b26b293a96c5ba1c22ec7e28764e70a8476", "content_id": "cfac3b70e51486d50fdaa21c03529bbdc8d8a2b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 480, "license_type": "no_license", "max_line_length": 61, "num_lines": 26, "path": "/Cargo.toml", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "[package]\nname = \"tactix\"\nversion = \"0.1.1\"\nauthors = [\"Harrison Burt <57491488+ChillFish8@users.noreply.github.com>\"]\nedition = \"2018\"\n\n[lib]\nname = \"tactix\"\ncrate-type = [\"cdylib\"]\n\n[dependencies.pyo3]\nversion = \"0.13.0\"\nfeatures = [\"extension-module\"]\n\n[dependencies]\nlazy_static = \"1.4.0\"\nfutures = \"0.3.8\"\n\ntokio = { version = \"1\", features = [\"full\"] }\n\n# faster memory allocator, seems to help PyO3 a decent amount\nmimalloc = { version = \"*\", default-features = false}\n\n[profile.release]\nlto = \"fat\"\ncodegen-units = 1" }, { "alpha_fraction": 0.5646258592605591, "alphanum_fraction": 0.5680271983146667, "avg_line_length": 19.83333396911621, "blob_id": "35ae5ce2bc380d4e06aadd0979aa2bb70736fe34", "content_id": "f014e3955725e128f3a18004e21efcc1607b65f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 882, "license_type": "no_license", "max_line_length": 74, "num_lines": 42, "path": "/src/actor.rs", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "use pyo3::prelude::*;\n\nuse tokio::sync::mpsc::unbounded_channel;\nuse tokio::task::JoinHandle;\n\nuse crate::runtime;\nuse crate::handle::{Sender, Handler};\n\n\n#[pyclass]\npub struct TactixActor {\n tx: Sender, // event, message, delay\n handle: JoinHandle<()>\n}\n\n#[pymethods]\nimpl TactixActor {\n #[new]\n fn new(on_message: PyObject) -> Self {\n let (tx, rx) = unbounded_channel();\n\n let handle = Handler::watch(rx, on_message);\n\n Self { tx, handle }\n }\n\n fn send(&self, event: PyObject, message: PyObject) {\n Handler::send(&self.tx, (event, message));\n }\n\n fn send_later(&self, event: PyObject, message: PyObject, delay: f64) {\n runtime::spawn(Handler::send_later(\n self.tx.clone(),\n (event, message),\n delay\n ));\n }\n\n fn shutdown(&self) {\n self.handle.abort();\n }\n}\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5762526988983154, "alphanum_fraction": 0.5773420333862305, "avg_line_length": 16.018518447875977, "blob_id": "134bcd2455a8fbce1593fda934b245f64860fdd2", "content_id": "4b7dfd1eb5281df0635f21de02b27c563543f697", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 918, "license_type": "no_license", "max_line_length": 59, "num_lines": 54, "path": "/src/lib.rs", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "#[macro_use]\nextern crate lazy_static;\n\nmod actor;\nmod runtime;\nmod handle;\n\nuse pyo3::prelude::*;\n\nuse mimalloc::MiMalloc;\nuse tokio::sync::mpsc::UnboundedSender;\n\nuse std::sync::mpsc;\n\n\n#[global_allocator]\nstatic GLOBAL: MiMalloc = MiMalloc;\n\n\n#[pyclass]\nstruct TactixRuntime {\n stop: UnboundedSender<()>,\n wait: mpsc::Receiver<()>,\n}\n\n#[pymethods]\nimpl TactixRuntime {\n #[new]\n fn new() -> Self {\n let (stop, wait) = runtime::start_background();\n Self { stop, wait }\n }\n\n fn wait(&mut self) {\n if let Err(e) = self.wait.recv() {\n eprintln!(\"{:?}\", e)\n }\n }\n\n fn shutdown(&self) {\n let _ = self.stop.send(());\n }\n}\n\n\n///\n/// Wraps all our existing pyobjects together in the module\n///\n#[pymodule]\nfn tactix(_py: Python, m: &PyModule) -> PyResult<()> {\n m.add_class::<TactixRuntime>()?;\n m.add_class::<actor::TactixActor>()?;\n Ok(())\n}" }, { "alpha_fraction": 0.559107780456543, "alphanum_fraction": 0.5635687708854675, "avg_line_length": 24.358489990234375, "blob_id": "92c151ed27c573b6c3addc31edea44dd2335993a", "content_id": "7a575a614ceba37937ebcbb0e4831ffcd5773a9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1345, "license_type": "no_license", "max_line_length": 75, "num_lines": 53, "path": "/src/handle.rs", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "use pyo3::{PyObject, Python};\n\nuse tokio::sync::mpsc::{UnboundedSender, UnboundedReceiver};\nuse tokio::time;\nuse tokio::task::JoinHandle;\n\nuse std::time::Duration;\nuse std::sync::Arc;\n\nuse crate::runtime;\n\npub type Payload = (PyObject, PyObject);\npub type Sender = UnboundedSender<Payload>;\npub type Receiver = UnboundedReceiver<Payload>;\n\n\npub struct Handler;\nimpl Handler {\n pub async fn send_later(sender: Sender, payload: Payload, delay: f64) {\n time::sleep(Duration::from_secs_f64(delay)).await;\n Self::send(&sender, payload);\n }\n\n pub fn send(tx: &Sender, payload: Payload) {\n if let Err(_) = tx.send(payload) {\n eprintln!(\"Actor disconnected while pending!\");\n };\n }\n}\n\nimpl Handler {\n pub fn watch(rx: Receiver, cb: PyObject) -> JoinHandle<()> {\n runtime::spawn(Self::_watch(rx, cb))\n }\n\n async fn _watch(mut rx: Receiver, cb: PyObject) {\n let cb = Arc::new(cb);\n while let Some(msg) = rx.recv().await {\n let _ = runtime::spawn(Self::invoke_cb(\n cb.clone(),\n msg,\n ));\n }\n }\n\n async fn invoke_cb(cb: Arc<PyObject>, payload: Payload) {\n Python::with_gil(|py| {\n if let Err(e) = cb.call1(py, payload) {\n println!(\"{:?}\", e);\n }\n });\n }\n}\n\n" }, { "alpha_fraction": 0.6039304733276367, "alphanum_fraction": 0.6039304733276367, "avg_line_length": 32.07500076293945, "blob_id": "4feb9bfa4d8031a8d2bc7c62ea0e11752da6856a", "content_id": "30a287f9c26c3693f432ff859636716aac071f9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2646, "license_type": "no_license", "max_line_length": 77, "num_lines": 80, "path": "/tactix/placeholder.py", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "from typing import Callable, Any\n\n\nclass TactixActor:\n \"\"\"\n Represents the native Actor class, this should not be implemented by the\n user directly, this is instead a raw implementation for low level\n implementations, developers should instead opt to use the\n main `tactix.Actor` class which implements all the required handling of\n this class.\n \"\"\"\n\n def __init__(self, _cb: Callable) -> None: ...\n\n def send(self, event: str, message: Any) -> None:\n \"\"\"\n Sends a message to a actor, this will not block.\n\n Args:\n event:\n The name of the event callback, this should not include the\n prefixed 'on_' and simply just be the name.\n\n This is parameter should be a string.\n\n message:\n The object to be sent to the channel, this can be any object.\n \"\"\"\n\n def send_later(self, event: str, message: Any, delay: float) -> None:\n \"\"\"\n Sends a message to an actor after a given delay (float in seconds),\n this will not block and will just schedule a new task to be made.\n\n Args:\n event:\n The name of the event callback, this should not include the\n prefixed 'on_' and simply just be the name.\n\n This is parameter should be a string.\n\n message:\n The object to be sent to the channel, this can be any object.\n\n delay:\n The time to have elapsed before sending the message, time in\n seconds and can be represented as a float.\n \"\"\"\n\n def shutdown(self) -> None:\n \"\"\"\n Shuts down the actor by cancelling the pending tasks that watches\n for messages, any pending messages at the time will be destroyed.\n \"\"\"\n\n\nclass TactixRuntime:\n \"\"\"\n Represents the actor runtime built on top of Tokio's Scheduler,\n this spawns the runtime in a background thread to prevent deadlocks\n with Python's GIL.\n \"\"\"\n def __init__(self) -> None: ...\n\n def wait(self) -> None:\n \"\"\"\n Waits for the runtime worker thread to finish, this will never\n return unless `TactixRuntime.shutdown(self)` is called to begin\n shutting down the actors that will be pending.\n \"\"\"\n\n def shutdown(self) -> None:\n \"\"\"\n Begins the shutdown of the runtime, this will start to stop running\n tasks in the background and will eventually stop completely.\n\n NOTE:\n There is not limit / timeout of how long it will take to shutdown\n after this function is called.\n \"\"\"\n" }, { "alpha_fraction": 0.5773177146911621, "alphanum_fraction": 0.5780505537986755, "avg_line_length": 28.187166213989258, "blob_id": "5842b25418a065cf7393239e8eda949a5e2416ca", "content_id": "462e9c29531ecc4e078a9d94803fa7477857bec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5458, "license_type": "no_license", "max_line_length": 84, "num_lines": 187, "path": "/tactix/actor.py", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "import typing as t\nimport inspect\n\nfrom traceback import print_exc\n\nfrom . import TactixActor, OnceTick\n\n\nclass ActorListener:\n \"\"\"\n Wraps a function / callable to become a message handler, if a event is\n emitted of the given name of the function (not including the 'on_'), the\n listener is called with the given message.\n \"\"\"\n def __init__(self, cb, name):\n self._cb = cb\n self._name = name\n\n def as_tuple(self):\n return self._name, self\n\n def __call__(self, *args, **kwargs):\n return self._cb(*args, **kwargs)\n\n\nclass Actor:\n \"\"\"\n The main actor class that all actors should inherit from, this implements\n the necessary handlers for the runtime and managers.\n \"\"\"\n\n def __init__(self):\n self.__actor = TactixActor(self.on_message)\n self.__listeners = self._load_listeners()\n\n self._load_coroutines()\n\n def __del__(self):\n self.__actor.shutdown()\n\n @classmethod\n def listener(cls, name: t.Optional[str] = None):\n \"\"\"\n Wraps a function or callable, adding it to the actor handle so that\n it is invoked when a message is sent to the actor and has the relevant\n event name.\n\n Args:\n name:\n A optional name to give to the listener if you wish to name\n it something other than the function name. e.g. to avoid naming\n collisions.\n \"\"\"\n\n def wrapper(func):\n if name is None:\n name_ = func.__name__\n else:\n name_ = name\n\n if not name_.startswith(\"on_\"):\n raise ValueError(\"Event names must be prefixed with 'on_'.\")\n\n if \" \" in name_:\n raise ValueError(\"Event names must not contain spaces.\")\n\n return ActorListener(func, name_[3:])\n\n return wrapper\n\n @staticmethod\n def _check_listener(cb) -> bool:\n return isinstance(cb, ActorListener)\n\n def _load_listeners(self):\n listeners = inspect.getmembers(self, self._check_listener)\n return dict(map(lambda parts: parts[1].as_tuple(), listeners))\n\n @staticmethod\n def _check_coroutine(cb) -> bool:\n return isinstance(cb, CoroutineStateMachine)\n\n def _load_coroutines(self):\n listeners: t.List[t.Tuple[str, CoroutineStateMachine]] = inspect.getmembers(\n self,\n self._check_coroutine\n )\n for _, caller in listeners:\n caller._set_actor(actor=self)\n\n def on_message(self, event: str, message: t.Any):\n \"\"\"\n The base call where all events to the actor are sent to, the handler\n then invokes the relevant handle.\n \"\"\"\n try:\n self.__listeners[event](self, message)\n except Exception as _:\n print_exc()\n\n def shutdown(self):\n \"\"\" Shuts down the actor and its relevant waiters \"\"\"\n self.__actor.shutdown()\n\n def send(self, event: str, message: t.Any, delay: float = 0):\n \"\"\"\n Sends a message to this actor. This will never block.\n If a delay is given (float in seconds), the message will send\n after the time has elapsed.\n\n Args:\n event:\n The name of the event callback, this should not include the\n prefixed 'on_' and simply just be the name.\n\n This is parameter should be a string.\n\n message:\n The object to be sent to the channel, this can be any object.\n\n delay:\n The time to have elapsed before sending the message, time in\n seconds and can be represented as a float.\n \"\"\"\n if delay == 0:\n self.__actor.send(event, message)\n else:\n self.__actor.send_later(event, message, delay)\n\n @classmethod\n def wrap_coroutine(cls, cb) -> \"CoroutineStateMachine\":\n \"\"\" Wraps a coroutine to become a finite state machine \"\"\"\n return CoroutineStateMachine(cb)\n\n\nclass CoroutineStateMachine:\n \"\"\"\n Turns a coroutine into a state machine producer. The coroutine is polled\n and state changed when a actor invokes a wakeup.\n \"\"\"\n\n def __init__(self, cb: t.Callable):\n self._cb = cb\n self._actor: t.Optional[Actor] = None\n\n def _set_actor(self, actor: Actor):\n self._actor = actor\n\n def __call__(self, *args, **kwargs):\n return ActorContext(self._cb, self._actor, args, kwargs)\n\n\nclass ActorContext(Actor):\n \"\"\"\n Used to handle the context of a given coroutine state machine\n this manages the coroutine state and also suspension via the sleep function\n \"\"\"\n\n def __init__(self, cb: t.Callable, actor: Actor, args, kwargs):\n super().__init__()\n self._coroutine = cb(\n actor,\n self,\n *args,\n **kwargs\n ).__await__().__iter__()\n\n self._start()\n\n def _start(self):\n self.send(\"wake\", None)\n\n @Actor.listener()\n def on_wake(self, _):\n try:\n next(self._coroutine)\n except StopIteration:\n self.shutdown()\n\n def sleep(self, n: float):\n \"\"\"\n Suspends the running coroutine without blocking the rest of the worker\n thread, you can use time.sleep(n) however this will take a worker\n thread out of action for the time.\n \"\"\"\n self.send(\"wake\", None, delay=n)\n return OnceTick()\n" }, { "alpha_fraction": 0.6195918321609497, "alphanum_fraction": 0.6195918321609497, "avg_line_length": 25.06382942199707, "blob_id": "93d61762b4b0c7859ac8e5a92c3dac61029f324a", "content_id": "1902d7c04b380983a9e79e4dc97f4194b8659349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1225, "license_type": "no_license", "max_line_length": 80, "num_lines": 47, "path": "/tactix/runner.py", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "from typing import Callable\n\nfrom . import TactixRuntime\n\n\ndef run(cb: Callable, *args, **kwargs):\n \"\"\"\n Creates and starts the runtime, this passes any args or kwargs to\n the callback, when the callback is finished the runtime shuts down\n and waits till it is closed.\n\n Args:\n cb:\n The main function of your program.\n *args:\n Any args to be passed to the callback.\n **kwargs:\n Any kwargs to be passed to the callback.\n \"\"\"\n rt = TactixRuntime()\n\n cb(*args, **kwargs)\n rt.shutdown()\n rt.wait()\n\n\ndef run_forever(cb: Callable, *args, **kwargs):\n \"\"\"\n Creates and starts the runtime, this passes the runtime handle and\n any args or kwargs to the callback. Unlike `tactix.run()` this does NOT\n shutdown the runtime after the main loop has finished and instead waits\n until something else has called the shutdown function of the runtime handle.\n\n Args:\n cb:\n The main function of your program.\n *args:\n Any args to be passed to the callback.\n **kwargs:\n Any kwargs to be passed to the callback.\n \"\"\"\n\n rt = TactixRuntime()\n\n cb(rt, *args, **kwargs)\n\n rt.wait()\n" }, { "alpha_fraction": 0.7953216433525085, "alphanum_fraction": 0.7953216433525085, "avg_line_length": 27.5, "blob_id": "6f4d0cd544f19d229ddb91b72c13ab3f21d2f4a1", "content_id": "c5d51378508aab1d8bdb57459c9d2a57d01a502f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/tactix/__init__.py", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "from .placeholder import *\nfrom .tactix import *\n\nfrom .runner import run, run_forever\nfrom .helpers import OnceTick\nfrom .actor import Actor, ActorListener, ActorContext\n" }, { "alpha_fraction": 0.5744985938072205, "alphanum_fraction": 0.5873925685882568, "avg_line_length": 18.94285774230957, "blob_id": "dd3019268cf79967879cec2254d5b0ef392d7938", "content_id": "54467baf9c52a2cf9b7d140bb65eda35e4c9b694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 59, "num_lines": 35, "path": "/tests/waker test.py", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "from time import sleep\n\nimport tactix\nfrom tactix import Actor, ActorContext\n\n\nclass MyActor(Actor):\n def __init__(self):\n super().__init__()\n self.count = 0\n\n @Actor.listener()\n def on_foo(self, message):\n self.handle_foo(message)\n\n @Actor.wrap_coroutine\n async def handle_foo(self, ctx: ActorContext, message):\n await ctx.sleep(message[1])\n print(\"done!\")\n\n @Actor.listener(name=\"on_hello\")\n def custom_name(self, message):\n print(f\"Got: {message}\")\n\n\ndef main():\n act1 = MyActor()\n act1.send(\"foo\", (\"foo 1\", 5), delay=2)\n act1.send(\"hello\", \"Hello, World!\")\n\n sleep(2)\n\n\nif __name__ == '__main__':\n tactix.run(main)\n" }, { "alpha_fraction": 0.6747881174087524, "alphanum_fraction": 0.6816737055778503, "avg_line_length": 25.957143783569336, "blob_id": "f56d8f63a92dc99f84dc26c73a1b64e4d58630f2", "content_id": "a3e5d9d9c162d0eb45cab01390ef1861149ccdb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1888, "license_type": "no_license", "max_line_length": 81, "num_lines": 70, "path": "/README.md", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "# Tactix\nA work-stealing actor framework for Python built on Tokio.rs.\n\nThe easiest way to understand how Tactix works is think: \n#### asyncio + threading + actors => tactix\n\nUnlike asyncio tactix will not be stopped or be interrupted by blocking tasks,\ninstead if a particular worker thread is blocking the other threads will steal\nthe work off of the blocked thread until it is released again. \n\nThis does mean you can use regular blocking calls like time.sleep(n) and not\nfear blocking the whole loop but note this will still affect the loop if more\nthan `n` blocking tasks are running where `n` is the amount of logical CPU cores.\n\n## Example\n```py\nfrom time import sleep\n\nimport tactix\nfrom tactix import Actor, ActorContext\n\n\nclass MyActor(Actor):\n def __init__(self):\n super().__init__()\n\n @Actor.listener()\n def on_foo(self, message):\n self.handle_foo(message)\n\n @Actor.wrap_coroutine\n async def handle_foo(self, ctx: ActorContext, message):\n # Like asyncio, using a non-blocking sleep, thousands of\n # tasks can run on a single thread.\n await ctx.sleep(message[1])\n print(\"done!\")\n\n @Actor.listener(name=\"on_hello\")\n def custom_name(self, message):\n # Now this will block the worker but all other tasks will be \n # un-effected as the work stealer will have re-distributed tasks.\n sleep(2)\n print(f\"Got: {message}\")\n\n\ndef main():\n act1 = MyActor()\n act1.send(\"foo\", (\"foo 1\", 5), delay=2)\n act1.send(\"hello\", \"Hello, World!\")\n\n sleep(8)\n\n\nif __name__ == '__main__':\n tactix.run(main)\n```\n\n\n## Building\n\nObviously because this system is built of the Rust back bone you're going to need\nto install Rust, you will also need `cmake`.\n\n### Using Maturin:\n\n1) `git pull https://github.com/ChillFish8/tactix.git`\n\n2) `maturin develop` or `maturin develop --release`\n\n3) Have fun.\n\n" }, { "alpha_fraction": 0.4592592716217041, "alphanum_fraction": 0.47160494327545166, "avg_line_length": 20.3157901763916, "blob_id": "d35c3f2bc561998703270f68f5369be821b38eb6", "content_id": "504918d182a8c61da49ade6dd5b619bb4b417df0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 45, "num_lines": 19, "path": "/tactix/helpers.py", "repo_name": "ChillFish8/tactix", "src_encoding": "UTF-8", "text": "class OnceTick:\n def __init__(self):\n self.__state = 0\n\n def __await__(self):\n return self\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.__state == 0:\n self.__state += 1\n return\n\n if self.__state == 1:\n self.__state += 1\n raise StopIteration(None)\n raise AssertionError(\"Invalid state\")\n" } ]
12
geohippie/NG911
https://github.com/geohippie/NG911
04375a3e304e5a5477367c8d685dc562a5cb35eb
c72164e308588f2a2dab639049a3a47b1c2c43a3
c4eaa934f246901b4aa119c1d6633b8255140cd8
refs/heads/master
2021-01-18T21:13:05.031085
2018-05-30T15:59:37
2018-05-30T15:59:37
40,149,067
0
0
null
2015-08-03T21:34:42
2015-08-03T21:58:58
2015-08-18T18:10:21
Python
[ { "alpha_fraction": 0.5049692392349243, "alphanum_fraction": 0.5106483697891235, "avg_line_length": 30.53731346130371, "blob_id": "06d1497c9ca7bb7752ec240582879a712627b78a", "content_id": "6ee26431aad49589ae02a900fa8fe09e55c157e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2113, "license_type": "no_license", "max_line_length": 99, "num_lines": 67, "path": "/Calc_Map_Range_in_Centerline.py", "repo_name": "geohippie/NG911", "src_encoding": "UTF-8", "text": "#-------------------------------------------------------------------------------\n# Name: Calculate Field Map Range or Label expression with Map Range\n# Purpose: Calculate Field Map Range or Label expression with Map Range\n#\n# Author: Lee Allen\n#\n# Created: 04/11/2016\n# Copyright: (c) Lee Allen 2016\n# License: Use at your earliest convenience\n#-------------------------------------------------------------------------------\n#For just labeling---->Below\n\n#While in Editing Session - Right Click MAP RANGE field in table>Field Calculator\n#Enter data as shown below\n\n#select Python\n#check Show Codeblock\n\n# Pre-Logic Script Code:\ndef map_range(L_T_ADD,R_T_ADD,R_F_ADD,L_F_ADD):\n lst = []\n lst.append(L_T_ADD)\n lst.append(R_T_ADD)\n lst.append(R_F_ADD)\n lst.append(L_F_ADD)\n mrange = str(min(lst)) + ' - ' + str(max(lst))\n return mrange\n\n#MAP_RANGE =\nmap_range( !L_T_ADD!, !R_T_ADD!, !R_F_ADD!, !L_F_ADD!)\n\n##---------------LABEL ADDRESS RANGES-----------------------------\n##----------------------------------------------------------------\n##----------------------------------------------------------------\n\n## USE INSTEAD of Calculating field\n## Check Display coded value description\n## Check Advanced\n## Parser is Python\n\ndef FindLabel ( [L_T_ADD] , [R_T_ADD] , [R_F_ADD] , [L_F_ADD] ):\n lst = []\n lst.append([L_T_ADD])\n lst.append([R_T_ADD])\n lst.append([R_F_ADD])\n lst.append([L_F_ADD])\n mrange = str(min(lst)) + ' - ' + str(max(lst))\n return mrange\n\n##---------------LABEL ADDRESS RANGES and Road NAME using LABEL Field -----------------------------\n## USE INSTEAD of Calculating field\n## Check Display coded value description\n## Check Advanced\n## Parser is Python\n\n## Returns the Street LABEL as well as a Map Range\n\ndef FindLabel ([L_T_ADD] ,[R_T_ADD] ,[R_F_ADD] ,[L_F_ADD], [LABEL]):\n lst = []\n lst.append([L_T_ADD])\n lst.append([R_T_ADD])\n lst.append([R_F_ADD])\n lst.append([L_F_ADD])\n mrange = str(min(lst)) + ' - ' + str(max(lst))\n label = [LABEL] \n namerange = label + ' ' + str(mrange)\n return namerange\n" }, { "alpha_fraction": 0.5271641612052917, "alphanum_fraction": 0.5564178824424744, "avg_line_length": 28.38596534729004, "blob_id": "4724c3cbf69621f2370e845b9bbd220ef22b766b", "content_id": "30fb30486ccedb920808f7f004090bcd9cfec980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1675, "license_type": "no_license", "max_line_length": 102, "num_lines": 57, "path": "/find_last_SEGID_used.py", "repo_name": "geohippie/NG911", "src_encoding": "UTF-8", "text": "#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Lee Allen\n#\n# Created: /2015\n# Updated 4/11/2016 to report/accept non number values if they occur in data field\n# Copyright: (c) Lee Allen 2015\n# License: Use at your convenience\n#-------------------------------------------------------------------------------\n\n#find the first and last SEGID or ADDID. Make sure GenerateID value is higher than last SEGID or ADDID\n#skips rows that are not numbers\n\nstreetcursor = arcpy.da.SearchCursor(\"RoadCenterline\",\"SEGID\")\nseglist = []\nfor row in streetcursor:\n SEGID = row[0]\n try:\n SEGID = int(row[0])\n seglist.append(SEGID)\n except:\n print 'Segment ID ', SEGID,' is not a number'\n pass\n\nseglist.sort()\nprint \"First SEGID =\", seglist[0],\"Last SEGID =\",seglist[-1]\n\napointcursor = arcpy.da.SearchCursor(\"AddressPoints\",\"ADDID\")\npointlist = []\nfor row in apointcursor:\n try:\n ADDID = int(row[0])\n pointlist.append(ADDID)\n except:\n print 'Point ID ' , ADDID ,' is not a number'\n pass\n\npointlist.sort()\nprint \"First ADD =\", pointlist[0],\"Last ADDID =\",pointlist[-1]\n\n##testlist = [1,5,8,12,55,0,3,7,66,'aaa',999,21,75]\n##segtestlist = []\n##for num in testlist:\n## SEGID = num\n## try:\n## if SEGID/1:\n## SEGID = int(num)\n## segtestlist.append(SEGID)\n## except:\n## print 'ID ' + SEGID +' is not a number'\n## pass\n##\n##segtestlist.sort()\n##print \"First SEGID =\", segtestlist[0],\"Last SEGID =\",segtestlist[-1]\n### should print: ID aaa is not a number First SEGID = 1 Last SEGID = 999\n" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.7954545617103577, "avg_line_length": 21, "blob_id": "4cc981f7f4e9feda350cc8e355cf2523819033ac", "content_id": "60d714a5e19cf9b2f6634357eca00f34422d41b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/README.md", "repo_name": "geohippie/NG911", "src_encoding": "UTF-8", "text": "# NG911\nscripts and files specific to NG911\n" } ]
3
gpulido/AirzoneCloud
https://github.com/gpulido/AirzoneCloud
b0eadd45bf9a180857bdeed2100b763bfd5e05a8
bf776a6e6e9519e7c7fa45cdcce30dd1a63005d8
fa99eb547b0feb104b43af3a01ad3b6d9d924fc8
refs/heads/master
2022-07-26T16:17:40.687614
2020-05-24T07:15:15
2020-05-24T07:15:15
271,732,313
3
0
MIT
2020-06-12T07:05:49
2020-05-25T07:26:57
2020-05-24T07:15:21
null
[ { "alpha_fraction": 0.5612483024597168, "alphanum_fraction": 0.5665887594223022, "avg_line_length": 32.28888702392578, "blob_id": "f31b5b7c4c2b40a9221a0cdabc64cf38b126c52e", "content_id": "df4d56c56a9def5b719b3a51edc81b5e834827dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5992, "license_type": "permissive", "max_line_length": 166, "num_lines": 180, "path": "/AirzoneCloud/AirzoneCloud.py", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport logging\nimport requests\nimport urllib\nimport urllib.parse\nimport json\n\nfrom .contants import (\n API_LOGIN,\n API_DEVICE_RELATIONS,\n API_SYSTEMS,\n API_ZONES,\n API_EVENTS,\n)\nfrom .Device import Device\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass AirzoneCloud:\n \"\"\"Allow to connect to AirzoneCloud API\"\"\"\n\n _session = None\n _username = None\n _password = None\n _base_url = \"https://www.airzonecloud.com\"\n _user_agent = \"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 7 Build/MOB30X; wv) AppleWebKit/537.26 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Safari/537.36\"\n _token = None\n _devices = []\n\n def __init__(\n self, username, password, user_agent=None, base_url=None,\n ):\n \"\"\"Initialize API connection\"\"\"\n self._session = requests.Session()\n self._username = username\n self._password = password\n if user_agent is not None and isinstance(user_agent, str):\n self._user_agent = user_agent\n if base_url is not None and isinstance(base_url, str):\n self._base_url = base_url\n # login\n self._login(username, password)\n # load devices\n self._load_devices()\n\n #\n # getters\n #\n\n @property\n def devices(self):\n \"\"\"Get devices list (same order as in app)\"\"\"\n return self._devices\n\n @property\n def all_systems(self):\n \"\"\"Get all systems from all devices (same order as in app)\"\"\"\n result = []\n for device in self.devices:\n for system in device.systems:\n result.append(system)\n return result\n\n @property\n def all_zones(self):\n \"\"\"Get all zones from all devices (same order as in app)\"\"\"\n result = []\n for device in self.devices:\n for system in device.systems:\n for zone in system.zones:\n result.append(zone)\n return result\n\n #\n # Refresh\n #\n\n def refresh_devices(self):\n \"\"\"Refresh devices\"\"\"\n self._load_devices()\n\n #\n # private\n #\n\n def _login(self, username, password):\n \"\"\"Login to AirzoneCloud and return token\"\"\"\n\n try:\n url = \"{}{}\".format(self._base_url, API_LOGIN)\n login_payload = {\"email\": username, \"password\": password}\n headers = {\"User-Agent\": self._user_agent}\n response = self._session.post(\n url, headers=headers, json=login_payload\n ).json()\n self._token = response.get(\"user\").get(\"authentication_token\")\n except (RuntimeError, AttributeError):\n raise Exception(\"Unable to login to AirzoneCloud\") from None\n\n _LOGGER.info(\"Login success as {}\".format(self._username))\n\n return self._token\n\n def _load_devices(self):\n \"\"\"Load all devices for this account\"\"\"\n current_devices = self._devices\n self._devices = []\n try:\n for device_relation in self._get_device_relations():\n device_data = device_relation.get(\"device\")\n device = None\n # search device in current_devices (if where are refreshing devices)\n for current_device in current_devices:\n if current_device.id == device_data.get(\"id\"):\n device = current_device\n device._set_data_refreshed(device_data)\n break\n # device not found => instance new device\n if device is None:\n device = Device(self, device_data)\n self._devices.append(device)\n except RuntimeError:\n raise Exception(\"Unable to load devices from AirzoneCloud\")\n return self._devices\n\n def _get_device_relations(self):\n \"\"\"Http GET to load devices\"\"\"\n _LOGGER.debug(\"get_device_relations()\")\n return self._get(API_DEVICE_RELATIONS).get(\"device_relations\")\n\n def _get_systems(self, device_id):\n \"\"\"Http GET to load systems\"\"\"\n _LOGGER.debug(\"get_systems(device_id={})\".format(device_id))\n return self._get(API_SYSTEMS, {\"device_id\": device_id}).get(\"systems\")\n\n def _get_zones(self, system_id):\n \"\"\"Http GET to load Zones\"\"\"\n _LOGGER.debug(\"get_zones(system_id={})\".format(system_id))\n return self._get(API_ZONES, {\"system_id\": system_id}).get(\"zones\")\n\n def _send_event(self, payload):\n \"\"\"Http POST to send an event\"\"\"\n _LOGGER.debug(\"Send event with payload: {}\".format(json.dumps(payload)))\n try:\n result = self._post(API_EVENTS, payload)\n _LOGGER.debug(\"Result event: {}\".format(json.dumps(result)))\n return result\n except RuntimeError:\n _LOGGER.error(\"Unable to send event to AirzoneCloud\")\n return None\n\n def _get(self, api_endpoint, params={}):\n \"\"\"Do a http GET request on an api endpoint\"\"\"\n params[\"format\"] = \"json\"\n params[\"user_email\"] = self._username\n params[\"user_token\"] = self._token\n url = \"{}{}/?{}\".format(\n self._base_url, api_endpoint, urllib.parse.urlencode(params)\n )\n headers = {\"User-Agent\": self._user_agent}\n return self._session.get(url, headers=headers).json()\n\n def _post(self, api_endpoint, payload={}):\n \"\"\"Do a http POST request on an api endpoint\"\"\"\n uri_params = {\n \"user_email\": self._username,\n \"user_token\": self._token,\n }\n url = \"{}{}/?{}\".format(\n self._base_url, api_endpoint, urllib.parse.urlencode(uri_params)\n )\n headers = {\n \"User-Agent\": self._user_agent,\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Accept\": \"application/json, text/plain, */*\",\n }\n return self._session.post(url, headers=headers, json=payload).json()\n" }, { "alpha_fraction": 0.5245847105979919, "alphanum_fraction": 0.5320044159889221, "avg_line_length": 25.0982666015625, "blob_id": "a7b43c3539d5a6c1514f9ebe8ea4c355bd128bd5", "content_id": "1d7f37a75d57a6eed4989f1a6d986af301322980", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9034, "license_type": "permissive", "max_line_length": 130, "num_lines": 346, "path": "/AirzoneCloud/System.py", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "import logging\nfrom .contants import (\n MODES_CONVERTER,\n ECO_CONVERTER,\n VELOCITIES_CONVERTER,\n AIRFLOW_CONVERTER,\n)\nfrom .Zone import Zone\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass System:\n \"\"\"Manage a AirzoneCloud system\"\"\"\n\n _api = None\n _device = None\n _data = {}\n _zones = []\n\n def __init__(self, api, device, data):\n self._api = api\n self._device = device\n self._data = data\n\n # log\n _LOGGER.info(\"Init {}\".format(self.str_complete))\n _LOGGER.debug(data)\n\n # load zones\n self._load_zones()\n\n def __str__(self):\n return \"System(name={}, mode={}, eco={}, velocity={}, airflow={})\".format(\n self.name, self.mode, self.eco, self.velocity, self.airflow,\n )\n\n @property\n def str_complete(self):\n return \"System(name={}, mode={}, eco={}, velocity={}, airflow={}, id={}, system_number={}, device_id={})\".format(\n self.name,\n self.mode,\n self.eco,\n self.velocity,\n self.airflow,\n self.id,\n self.system_number,\n self.device_id,\n )\n\n #\n # getters\n #\n\n @property\n def name(self):\n return self._data.get(\"name\")\n\n @property\n def mode(self):\n if self.mode_raw is None:\n return None\n return MODES_CONVERTER[self.mode_raw][\"name\"]\n\n @property\n def mode_description(self):\n if self.mode_raw is None:\n return None\n return MODES_CONVERTER[self.mode_raw][\"description\"]\n\n @property\n def mode_raw(self):\n return self._data.get(\"mode\")\n\n @property\n def eco(self):\n if self.eco_raw is None:\n return None\n return ECO_CONVERTER[self.eco_raw][\"name\"]\n\n @property\n def eco_description(self):\n if self.eco_raw is None:\n return None\n return ECO_CONVERTER[self.eco_raw][\"description\"]\n\n @property\n def eco_raw(self):\n return self._data.get(\"eco\")\n\n @property\n def has_velocity(self):\n return self._data.get(\"has_velocity\")\n\n @property\n def velocity(self):\n if self.velocity_raw is None:\n return None\n return VELOCITIES_CONVERTER[self.velocity_raw][\"name\"]\n\n @property\n def velocity_description(self):\n if self.velocity_raw is None:\n return None\n return VELOCITIES_CONVERTER[self.velocity_raw][\"description\"]\n\n @property\n def velocity_raw(self):\n return self._data.get(\"velocity\")\n\n @property\n def has_airflow(self):\n return self._data.get(\"has_air_flow\")\n\n @property\n def airflow(self):\n if self.airflow_raw is None:\n return None\n return AIRFLOW_CONVERTER[self.airflow_raw][\"name\"]\n\n @property\n def airflow_description(self):\n if self.airflow_raw is None:\n return None\n return AIRFLOW_CONVERTER[self.airflow_raw][\"description\"]\n\n @property\n def airflow_raw(self):\n return self._data.get(\"air_flow\")\n\n @property\n def max_temp(self):\n if self._data.get(\"max_limit\") is not None:\n return float(self._data.get(\"max_limit\"))\n return None\n\n @property\n def min_temp(self):\n if self._data.get(\"min_limit\") is not None:\n return float(self._data.get(\"min_limit\"))\n return None\n\n @property\n def id(self):\n return self._data.get(\"id\")\n\n @property\n def device_id(self):\n return self._data.get(\"device_id\")\n\n @property\n def system_number(self):\n return self._data.get(\"system_number\")\n\n @property\n def firmware_ws(self):\n return self._data.get(\"firm_ws\")\n\n @property\n def firmware_system(self):\n return self._data.get(\"system_fw\")\n\n #\n # setters\n #\n\n def set_mode(self, mode_name):\n \"\"\" Set mode of the system \"\"\"\n _LOGGER.info(\"call set_mode({}) on {}\".format(mode_name, self))\n mode_id_found = None\n for mode_id, mode in MODES_CONVERTER.items():\n if mode[\"name\"] == mode_name:\n mode_id_found = mode_id\n break\n if mode_id_found is None:\n raise ValueError('mode name \"{}\" not found'.format(mode_name))\n\n # send event\n self._send_event(\"mode\", int(mode_id_found))\n\n # update mode\n self._data[\"mode\"] = mode_id_found\n\n # refresh modes on sub-zones (don't refresh because API so slow to update sub-zones, about 5sec...)\n for zone in self.zones:\n zone._data[\"mode\"] = mode_id_found\n\n return True\n\n #\n # children\n #\n\n @property\n def zones(self):\n \"\"\" Get all zones in this system \"\"\"\n return self._zones\n\n #\n # parent device\n #\n\n @property\n def device(self):\n \"\"\" Get parent device \"\"\"\n return self._device\n\n #\n # Refresh\n #\n\n def ask_airzone_update(self):\n \"\"\"\n Ask an update to the airzone hardware (airzonecloud don't autopull data like current temperature)\n The update should be available in airzonecloud after 3 to 5 secs in average\n \"\"\"\n self._ask_airzone_update()\n\n def refresh(self, refresh_zones=True):\n \"\"\" Refresh current system data (call refresh_systems on parent device) \"\"\"\n\n # ask airzone to update its data in airzonecloud (there is some delay so current update will be available on next refresh)\n self.ask_airzone_update()\n\n # refresh systems (including current) from parent device\n self.device.refresh_systems()\n\n # refresh subzones in needed\n if refresh_zones:\n self._load_zones()\n\n #\n # private\n #\n\n def _load_zones(self):\n \"\"\"Load all zones for this system\"\"\"\n current_zones = self._zones\n self._zones = []\n try:\n for zone_data in self._api._get_zones(self.id):\n zone = None\n # search zone in current_zones (if where are refreshing zones)\n for current_zone in current_zones:\n if current_zone.id == zone_data.get(\"id\"):\n zone = current_zone\n zone._set_data_refreshed(zone_data)\n break\n # zone not found => instance new zone\n if zone is None:\n zone = Zone(self._api, self, zone_data)\n self._zones.append(zone)\n except RuntimeError:\n raise Exception(\n \"Unable to load zones of system {} ({}) from AirzoneCloud\".format(\n self.name, self.id\n )\n )\n\n return self._zones\n\n def _send_event(self, option, value):\n \"\"\" Send an event for current system \"\"\"\n payload = {\n \"event\": {\n \"cgi\": \"modsistema\",\n \"device_id\": self.device_id,\n \"system_number\": self.system_number,\n \"option\": option,\n \"value\": value,\n }\n }\n return self._api._send_event(payload)\n\n def _ask_airzone_update(self):\n \"\"\"Ask an update to the airzone hardware (airzonecloud don't autopull data)\"\"\"\n payload = {\n \"event\": {\n \"cgi\": \"infosistema2\",\n \"device_id\": self.device_id,\n \"system_number\": self.system_number,\n \"option\": None,\n \"value\": None,\n }\n }\n return self._api._send_event(payload)\n\n def _set_data_refreshed(self, data):\n \"\"\" Set data refreshed (call by parent device on refresh_systems()) \"\"\"\n self._data = data\n _LOGGER.info(\"Data refreshed for {}\".format(self.str_complete))\n\n\n#\n# System raw data example\n#\n\n# {\n# \"id\": \"...\",\n# \"device_id\": \"...\",\n# \"name\": \"Home\",\n# \"eco\": \"2\",\n# \"eco_color\": \"5\",\n# \"velocity\": null,\n# \"air_flow\": null,\n# \"connMC\": null,\n# \"VMC_mode\": \"0\",\n# \"VMC_state\": \"0\",\n# \"has_velocity\": false,\n# \"has_air_flow\": false,\n# \"mode\": \"5\",\n# \"modes\": \"1111111011\",\n# \"master_setup\": false,\n# \"setup_type\": \"0\",\n# \"max_limit\": \"30.0\",\n# \"min_limit\": \"18.0\",\n# \"zones_ids\": [\n# \"id1...\",\n# \"id2...\",\n# \"id3...\",\n# \"id4...\",\n# ],\n# \"class\": \"System\",\n# \"updated_at\": 1587195368,\n# \"system_number\": \"1\",\n# \"last_update\": 1587195368,\n# \"firm_ws\": \"3.173\",\n# \"scene\": null,\n# \"auto\": null,\n# \"temperature_unit\": null,\n# \"autochange_differential\": null,\n# \"config_ZBS_visible_environment\": null,\n# \"system_fw\": 3.09,\n# \"heat_stages\": \"1\",\n# \"cold_stages\": null,\n# \"auto_index_prog\": true,\n# \"system_errors\": \"00000001\",\n# \"auto_mode_battery_temperature\": false,\n# \"machine_error_code\": \"ÿÿÿÿ\",\n# \"setpoint\": null,\n# \"tank_temp\": null,\n# \"powerful\": null,\n# \"power_acs\": null,\n# \"acs_min\": null,\n# \"acs_max\": null,\n# }\n" }, { "alpha_fraction": 0.8275862336158752, "alphanum_fraction": 0.8275862336158752, "avg_line_length": 28, "blob_id": "b30ddacb34e1a17e3bea76bfe2d977895c37c2b7", "content_id": "334b0a9f1630f5d151fd2dabbb6f2509a8cce08d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "permissive", "max_line_length": 38, "num_lines": 4, "path": "/AirzoneCloud/__init__.py", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "from .AirzoneCloud import AirzoneCloud\nfrom .Device import Device\nfrom .System import System\nfrom .Zone import Zone\n" }, { "alpha_fraction": 0.6501332521438599, "alphanum_fraction": 0.6636605858802795, "avg_line_length": 25.66120147705078, "blob_id": "618e6874102b6d94243ba7f1a31e69c0e550ef07", "content_id": "4659b34abc32dd532cec08704c1c2a4d5d5f5c66", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4879, "license_type": "permissive", "max_line_length": 236, "num_lines": 183, "path": "/README.md", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "# Airzone Cloud\n\n- [Airzone Cloud](#airzone-cloud)\n - [Presentation](#presentation)\n - [Abstract](#abstract)\n - [Module classes](#module-classes)\n - [Usage](#usage)\n - [Install](#install)\n - [Start API](#start-api)\n - [Get device status](#get-device-status)\n - [Get system status](#get-system-status)\n - [Get all zones status (on all devices / systems)](#get-all-zones-status-on-all-devices--systems)\n - [Control a specific zone](#control-a-specific-zone)\n - [HVAC mode](#hvac-mode)\n - [Available modes](#available-modes)\n - [Set HVAC mode on a system (and its sub-zones)](#set-hvac-mode-on-a-system-and-its-sub-zones)\n - [API doc](#api-doc)\n - [Constructor](#constructor)\n\n## Presentation\n\n### Abstract\n\nAllow to communicate easily with Airzone Cloud to retrieve information or to send commands (on/off, temperature, HVAC mode, ...)\n\nThis library manage the main Airzone Cloud API (try to connect to [www.airzonecloud.com](https://www.airzonecloud.com) to be sure).\n\nIf you are looking for the specific Airzone Cloud API for Daikin (try to connect to [dkn.airzonecloud.com](https://dkn.airzonecloud.com)), you should use this package : [AirzoneCloudDaikin](https://github.com/max13fr/AirzoneCloudDaikin)\n\n### Module classes\n\n* **AirzoneCloud** : represent your AirzoneCloud account. Contains a list of your **devices** :\n * **Device** : represent one of your Airzone webserver registered. Contains a list of its **systems** :\n * **System** : represent your climate equipment (Mitsubishi, Daikin, ...). Contains a list of its **zones** :\n * **Zone** : represent a zone to control\n\n## Usage\n\n### Install\n\n```bash\npip3 install AirzoneCloud\n```\n\n### Start API\n\n```python\nfrom AirzoneCloud import AirzoneCloud\napi = AirzoneCloud(\"email@domain.com\", \"password\")\n```\n\n### Get device status\n\n```python\nfor device in api.devices:\n print(\n \"Device name={}, status={}, id={}, mac={}, pin={}\".format(\n device.name, device.status, device.id, device.mac, device.pin\n )\n )\n```\n\nOutput :\n\n<pre>\nDevice name=Home, status=activated, id=5bc8ae0c4149526af90c0000, mac=AA:BB:CC:DD:EE:FF, pin=1234\n</pre>\n\n### Get system status\n\n```python\nfor system in api.devices[0].systems:\n print(\n \"System name={}, mode={}, eco={}, velocity={}, airflow={}\".format(\n system.name,\n system.mode,\n system.eco,\n system.velocity,\n system.airflow,\n )\n )\n```\n\nOutput :\n\n<pre>\nSystem name=Home, mode=heat-both, eco=eco-a, velocity=None, airflow=None\n</pre>\n\n### Get all zones status (on all devices / systems)\n\n```python\nfor zone in api.all_zones:\n print(\n \"Zone name={}, is_on={}, mode={}, current_temperature={}, target_temperature={}\".format(\n zone.name,\n zone.is_on,\n zone.mode,\n zone.current_temperature,\n zone.target_temperature,\n )\n )\n```\n\nOutput :\n\n<pre>\nZone name=Baby bedroom, is_on=False, mode=heat-both, current_temperature=20.4, target_temperature=19.5\nZone name=Parents bedroom, is_on=False, mode=heat-both, current_temperature=21.1, target_temperature=17.0\nZone name=Living room, is_on=True, mode=heat-both, current_temperature=21.4, target_temperature=21.5\nZone name=Kitchen, is_on=False, mode=heat-both, current_temperature=21.2, target_temperature=19.0\n</pre>\n\n### Control a specific zone\n\n```python\nzone = api.all_zones[2]\nprint(zone)\n\n# start zone\nzone.turn_on()\n\n# set temperature\nzone.set_temperature(18.5)\n\nprint(zone)\n```\n\nOutput :\n\n<pre>\nZone(name=Living room, is_on=False, mode=heat-both, current_temp=21.6, target_temp=21.0)\nZone(name=Living room, is_on=True, mode=heat-both, current_temp=21.6, target_temp=18.5)\n</pre>\n\n### HVAC mode\n\n#### Available modes\n\n* **stop** : Stop\n* **ventilate** : Ventilate\n* **dehumidify** : Dry\n* **heat-air** : Air heating\n* **heat-radiant** : Radiant heating\n* **heat-both** : Combined heating\n* **cool-air** : Air cooling\n* **cool-radiant** : Radiant cooling\n* **cool-both** : Combined cooling\n\n#### Set HVAC mode on a system (and its sub-zones)\n\n```python\nsystem = api.devices[0].systems[0]\nprint(system)\n\n# set mode to heat-both\nsystem.set_mode(\"heat-both\")\n\nprint(system)\n```\n\nOutput :\n\n<pre>\nSystem(name=Home, mode=stop, eco=eco-a, velocity=None, airflow=None)\nSystem(name=Home, mode=heat-both, eco=eco-a, velocity=None, airflow=None)\n</pre>\n\n## API doc\n\n[API full doc](API.md)\n\n### Constructor\n\n```python\nAirzoneCloud(username, password, user_agent=None, base_url=None)\n```\n\n* **username** : you're username used to connect on Airzone Cloud website or app\n* **password** : you're password used to connect on Airzone Cloud website or app\n* **user_agent** : allow to change default user agent if set\n* **base_url** : allow to change base url of the Airzone Cloud API if set\n * default value : _https://www.airzonecloud.com_\n" }, { "alpha_fraction": 0.6896024346351624, "alphanum_fraction": 0.6901121139526367, "avg_line_length": 15.0819673538208, "blob_id": "f3ac39fbe6b51337c1a29c70d65b48c4d93a4ab2", "content_id": "eddee60319f1a7ef085a0c8464fda41f1c582ed1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3926, "license_type": "permissive", "max_line_length": 100, "num_lines": 244, "path": "/API.md", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "# AirzoneCloud package\n\n## Submodules\n\n## AirzoneCloud.AirzoneCloud module\n\n\n### class AirzoneCloud.AirzoneCloud.AirzoneCloud(username, password, user_agent=None, base_url=None)\nBases: `object`\n\nAllow to connect to AirzoneCloud API\n\n\n#### \\__init__(username, password, user_agent=None, base_url=None)\nInitialize API connection\n\n\n#### property all_systems()\nGet all systems from all devices (same order as in app)\n\n\n#### property all_zones()\nGet all zones from all devices (same order as in app)\n\n\n#### property devices()\nGet devices list (same order as in app)\n\n\n#### refresh_devices()\nRefresh devices\n\n## AirzoneCloud.Device module\n\n\n### class AirzoneCloud.Device.Device(api, data)\nBases: `object`\n\nManage a AirzoneCloud device\n\n\n#### \\__init__(api, data)\nInitialize self. See help(type(self)) for accurate signature.\n\n\n#### property firmware_ws()\nReturn webserver device\n\n\n#### property has_airflow()\n\n#### property has_eco()\n\n#### property has_farenheit()\n\n#### property has_velocity()\n\n#### property id()\nReturn device id\n\n\n#### property location()\nReturn device location\n\n\n#### property mac()\nReturn device mac\n\n\n#### property name()\nReturn device name\n\n\n#### property pin()\nReturn device pin code\n\n\n#### refresh(refresh_systems=True)\nRefresh current device data (call refresh_devices on parent AirzoneCloud)\n\n\n#### refresh_systems()\nRefresh all systems of this device\n\n\n#### property status()\nReturn device status\n\n\n#### property str_complete()\n\n#### property sync_datetime()\nReturn True if device datetime is sync with AirzoneCloud\n\n\n#### property systems()\n\n#### property target_temperature()\nReturn device target temperature\n\n## AirzoneCloud.System module\n\n\n### class AirzoneCloud.System.System(api, device, data)\nBases: `object`\n\nManage a AirzoneCloud system\n\n\n#### \\__init__(api, device, data)\nInitialize self. See help(type(self)) for accurate signature.\n\n\n#### property airflow()\n\n#### property airflow_description()\n\n#### property airflow_raw()\n\n#### ask_airzone_update()\nAsk an update to the airzone hardware (airzonecloud don’t autopull data like current temperature)\nThe update should be available in airzonecloud after 3 to 5 secs in average\n\n\n#### property device()\nGet parent device\n\n\n#### property device_id()\n\n#### property eco()\n\n#### property eco_description()\n\n#### property eco_raw()\n\n#### property firmware_system()\n\n#### property firmware_ws()\n\n#### property has_airflow()\n\n#### property has_velocity()\n\n#### property id()\n\n#### property max_temp()\n\n#### property min_temp()\n\n#### property mode()\n\n#### property mode_description()\n\n#### property mode_raw()\n\n#### property name()\n\n#### refresh(refresh_zones=True)\nRefresh current system data (call refresh_systems on parent device)\n\n\n#### set_mode(mode_name)\nSet mode of the system\n\n\n#### property str_complete()\n\n#### property system_number()\n\n#### property velocity()\n\n#### property velocity_description()\n\n#### property velocity_raw()\n\n#### property zones()\nGet all zones in this system\n\n## AirzoneCloud.Zone module\n\n\n### class AirzoneCloud.Zone.Zone(api, system, data)\nBases: `object`\n\nManage a Airzonecloud zone\n\n\n#### \\__init__(api, system, data)\nInitialize self. See help(type(self)) for accurate signature.\n\n\n#### property current_humidity()\n\n#### property current_temperature()\n\n#### property device_id()\n\n#### property id()\n\n#### property is_on()\n\n#### property max_temp()\n\n#### property min_temp()\n\n#### property mode()\n\n#### property mode_description()\n\n#### property mode_raw()\n\n#### property name()\n\n#### refresh()\nRefresh current zone data (call refresh on parent system)\n\n\n#### set_temperature(temperature)\nSet target_temperature for this zone\n\n\n#### property str_complete()\n\n#### property system()\nGet parent system\n\n\n#### property system_number()\n\n#### property target_temperature()\n\n#### turn_off()\nTurn zone off\n\n\n#### turn_on()\nTurn zone on\n\n\n#### property zone_number()\n## AirzoneCloud.contants module\n\n## Module contents\n" }, { "alpha_fraction": 0.5072491765022278, "alphanum_fraction": 0.5250338315963745, "avg_line_length": 24.99497413635254, "blob_id": "534d1e5d4e3af59adff6cdad6b2f662e9bdc618d", "content_id": "b7bb4628bc1bea2b4ac9f83870bb2549936ef5e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5175, "license_type": "permissive", "max_line_length": 89, "num_lines": 199, "path": "/AirzoneCloud/Device.py", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "import logging\nfrom .System import System\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Device:\n \"\"\"Manage a AirzoneCloud device\"\"\"\n\n _api = None\n _data = {}\n _systems = []\n\n def __init__(self, api, data):\n self._api = api\n\n # remove weather (huge array with all translates)\n if \"data\" in data and \"data\" in data[\"data\"]:\n data[\"data\"][\"data\"].pop(\"weather\", True)\n\n self._data = data\n\n # log\n _LOGGER.info(\"Init {}\".format(self.str_complete))\n _LOGGER.debug(data)\n\n # load all systems\n self._load_systems()\n\n def __str__(self):\n return \"Device(name={}, status={})\".format(self.name, self.status)\n\n @property\n def str_complete(self):\n return \"Device(name={}, status={}, id={}, mac={})\".format(\n self.name, self.status, self.id, self.mac,\n )\n\n #\n # getters\n #\n\n @property\n def id(self):\n \"\"\" Return device id \"\"\"\n return self._data.get(\"id\")\n\n @property\n def name(self):\n \"\"\" Return device name \"\"\"\n return self._data.get(\"name\")\n\n @property\n def status(self):\n \"\"\" Return device status \"\"\"\n return self._data.get(\"status\")\n\n @property\n def location(self):\n \"\"\" Return device location \"\"\"\n return self._data.get(\"complete_name\")\n\n @property\n def mac(self):\n \"\"\" Return device mac \"\"\"\n return self._data.get(\"mac\")\n\n @property\n def pin(self):\n \"\"\" Return device pin code \"\"\"\n return self._data.get(\"pin\")\n\n @property\n def target_temperature(self):\n \"\"\" Return device target temperature \"\"\"\n return self._data.get(\"consign\")\n\n @property\n def firmware_ws(self):\n \"\"\" Return webserver device \"\"\"\n return self._data.get(\"firm_ws\")\n\n @property\n def has_eco(self):\n return self._data.get(\"has_eco\")\n\n @property\n def has_velocity(self):\n return self._data.get(\"has_velocity\")\n\n @property\n def has_airflow(self):\n return self._data.get(\"has_air_flow\")\n\n @property\n def has_farenheit(self):\n return self._data.get(\"has_harenheit\")\n\n @property\n def sync_datetime(self):\n \"\"\" Return True if device datetime is sync with AirzoneCloud \"\"\"\n return self._data.get(\"sync_datetime\")\n\n #\n # children\n #\n\n @property\n def systems(self):\n return self._systems\n\n #\n # Refresh\n #\n\n def refresh(self, refresh_systems=True):\n \"\"\" Refresh current device data (call refresh_devices on parent AirzoneCloud) \"\"\"\n self._api.refresh_devices()\n if refresh_systems:\n self.refresh_systems()\n\n def refresh_systems(self):\n \"\"\" Refresh all systems of this device \"\"\"\n self._load_systems()\n\n #\n # private\n #\n\n def _load_systems(self):\n \"\"\"Load all systems for this device\"\"\"\n current_systems = self._systems\n self._systems = []\n try:\n for system_data in self._api._get_systems(self.id):\n system = None\n # search system in current_systems (if where are refreshing systems)\n for current_system in current_systems:\n if current_system.id == system_data.get(\"id\"):\n system = current_system\n system._set_data_refreshed(system_data)\n break\n # system not found => instance new system\n if system is None:\n system = System(self._api, self, system_data)\n self._systems.append(system)\n except RuntimeError:\n raise Exception(\n \"Unable to load systems of device {} ({}) from AirzoneCloud\".format(\n self.name, self.id\n )\n )\n return self._systems\n\n def _set_data_refreshed(self, data):\n \"\"\" Set data refreshed (call by parent AirzoneCloud on refresh_devices()) \"\"\"\n self._data = data\n _LOGGER.info(\"Data refreshed for {}\".format(self.str_complete))\n\n\n#\n# device raw data example\n#\n\n# {\n# \"id\": \"...\",\n# \"mac\": \"AA:BB:CC:DD:EE:FF\",\n# \"pin\": \"1234\",\n# \"name\": \"Home\",\n# \"icon\": 5,\n# \"consign\": \"19.0\",\n# \"sync_datetime\": True,\n# \"remote_control\": False,\n# \"firm_ws\": \"3.173\",\n# \"status\": \"activated\",\n# \"connection_date\": \"2020-04-18T08:58:15.000+00:00\",\n# \"has_eco\": True,\n# \"has_velocity\": False,\n# \"spot_name\": \"Marseille\",\n# \"complete_name\": \"Marseille,Bouches-du-Rhône,Provence-Alpes-Côte d'Azur,France\",\n# \"country_code\": \"FR\",\n# \"electricity_prices\": {},\n# \"location\": {\"latitude\": 43.00000000000000, \"longitude\": 5.00000000000000},\n# \"data\": {\n# \"data\": {\n# \"time_zone\": [\n# {\n# \"localtime\": \"2020-04-18 05:34\",\n# \"utcOffset\": \"2.0\",\n# \"zone\": \"Europe/Paris\",\n# }\n# ]\n# }\n# },\n# \"modes\": \"00001111111011\",\n# \"has_air_flow\": False,\n# \"has_scene\": False,\n# \"has_farenheit\": False,\n# }\n" }, { "alpha_fraction": 0.49870365858078003, "alphanum_fraction": 0.5097225904464722, "avg_line_length": 25.600000381469727, "blob_id": "54940b82492c2e91f78d2c4d69979a3e94b498e8", "content_id": "71e3874857b1a2a12d8861ab9dc807627fc23dd1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7714, "license_type": "permissive", "max_line_length": 144, "num_lines": 290, "path": "/AirzoneCloud/Zone.py", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "import logging\nfrom .contants import MODES_CONVERTER\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Zone:\n \"\"\"Manage a Airzonecloud zone\"\"\"\n\n _api = None\n _system = None\n _data = {}\n\n def __init__(self, api, system, data):\n self._api = api\n self._system = system\n self._data = data\n\n # log\n _LOGGER.info(\"Init {}\".format(self.str_complete))\n _LOGGER.debug(data)\n\n def __str__(self):\n return \"Zone(name={}, is_on={}, mode={}, current_temp={}, target_temp={})\".format(\n self.name,\n self.is_on,\n self.mode,\n self.current_temperature,\n self.target_temperature,\n )\n\n @property\n def str_complete(self):\n return \"Zone(name={}, is_on={}, mode={}, current_temperature={} target_temperature={}, id={}, system_number={}, zone_number={})\".format(\n self.name,\n self.is_on,\n self.mode,\n self.current_temperature,\n self.target_temperature,\n self.id,\n self.system_number,\n self.zone_number,\n )\n\n #\n # getters\n #\n\n @property\n def name(self):\n return self._data.get(\"name\")\n\n @property\n def current_temperature(self):\n if self._data.get(\"temp\") is not None:\n return float(self._data.get(\"temp\"))\n return None\n\n @property\n def current_humidity(self):\n if self._data.get(\"humidity\") is not None:\n return float(self._data.get(\"humidity\"))\n return None\n\n @property\n def target_temperature(self):\n if self._data.get(\"consign\") is not None:\n return float(self._data.get(\"consign\"))\n return None\n\n @property\n def max_temp(self):\n if self._data.get(\"upper_conf_limit\") is not None:\n return float(self._data.get(\"upper_conf_limit\"))\n return None\n\n @property\n def min_temp(self):\n if self._data.get(\"lower_conf_limit\") is not None:\n return float(self._data.get(\"lower_conf_limit\"))\n return None\n\n @property\n def is_on(self):\n return bool(int(self._data.get(\"state\", 0)))\n\n @property\n def mode(self):\n return MODES_CONVERTER[self.mode_raw][\"name\"]\n\n @property\n def mode_description(self):\n return MODES_CONVERTER[self.mode_raw][\"description\"]\n\n @property\n def mode_raw(self):\n return self._data.get(\"mode\")\n\n @property\n def id(self):\n return self._data.get(\"id\")\n\n @property\n def device_id(self):\n return self._data.get(\"device_id\")\n\n @property\n def system_number(self):\n return self._data.get(\"system_number\")\n\n @property\n def zone_number(self):\n return self._data.get(\"zone_number\")\n\n #\n # setters\n #\n\n def turn_on(self):\n \"\"\" Turn zone on \"\"\"\n _LOGGER.info(\"call turn_on() on {}\".format(self.str_complete))\n self._send_event(\"state\", 1)\n self._data[\"state\"] = \"1\"\n return True\n\n def turn_off(self):\n \"\"\" Turn zone off \"\"\"\n _LOGGER.info(\"call turn_off() on {}\".format(self.str_complete))\n self._send_event(\"state\", 0)\n self._data[\"state\"] = \"0\"\n return True\n\n def set_temperature(self, temperature):\n \"\"\" Set target_temperature for this zone \"\"\"\n _LOGGER.info(\n \"call set_temperature({}) on {}\".format(temperature, self.str_complete)\n )\n temperature = float(temperature)\n if self.min_temp is not None and temperature < self.min_temp:\n temperature = self.min_temp\n if self.max_temp is not None and temperature > self.max_temp:\n temperature = self.max_temp\n self._send_event(\"consign\", temperature)\n self._data[\"consign\"] = str(temperature)\n return True\n\n #\n # parent system\n #\n\n @property\n def system(self):\n \"\"\" Get parent system \"\"\"\n return self._system\n\n #\n # Refresh zone data\n #\n\n def refresh(self):\n \"\"\" Refresh current zone data (call refresh on parent system) \"\"\"\n self.system.refresh()\n\n #\n # private\n #\n\n def _send_event(self, option, value):\n \"\"\" Send an event for current zone \"\"\"\n payload = {\n \"event\": {\n \"cgi\": \"modzona\",\n \"device_id\": self.device_id,\n \"system_number\": self.system_number,\n \"zone_number\": self.zone_number,\n \"option\": option,\n \"value\": value,\n }\n }\n return self._api._send_event(payload)\n\n def _set_data_refreshed(self, data):\n \"\"\" Set data refreshed (call by parent system on refresh_zones()) \"\"\"\n self._data = data\n _LOGGER.info(\"Data refreshed for {}\".format(self.str_complete))\n\n\n#\n# Zone raw data example\n#\n\n# {\n# \"id\": \"...\",\n# \"system_id\": \"...\",\n# \"device_id\": \"...\",\n# \"modes\": \"1111111011\",\n# \"warning\": \"0\",\n# \"name\": \"Living room\",\n# \"system_number\": \"1\",\n# \"zone_number\": \"6\",\n# \"state\": \"1\",\n# \"consign\": \"21.5\",\n# \"temp\": \"21.4\",\n# \"mode\": \"5\",\n# \"velocity\": None,\n# \"show_velocity\": None,\n# \"sleep\": \"0\",\n# \"lower_conf_limit\": \"18.0\",\n# \"upper_conf_limit\": \"30.0\",\n# \"master\": \"1\",\n# \"velMax\": None,\n# \"eco\": \"2\",\n# \"prog_enabled\": \"1\",\n# \"speed_prog_mode\": \"0\",\n# \"show_ventilation\": \"1\",\n# \"updated_at\": 1587190474,\n# \"setup_type\": \"0\",\n# \"class\": \"Zone\",\n# \"last_update\": 1587190474,\n# \"next_schedule_number\": 4,\n# \"led\": None,\n# \"offset\": None,\n# \"cold_offset\": None,\n# \"heat_offset\": None,\n# \"scene\": None,\n# \"air_flow\": None,\n# \"humidity\": \"42\",\n# \"coldConsign\": \"\",\n# \"heatConsign\": \"\",\n# \"auto\": None,\n# \"temperature_unit\": None,\n# \"vla\": None,\n# \"config\": {\n# \"id\": \"...\",\n# \"cold_values\": \"1\",\n# \"heat_values\": \"1\",\n# \"cold_angle\": None,\n# \"heat_angle\": None,\n# \"swing_horizontal\": None,\n# \"swing_vertical\": None,\n# \"antifreeze\": \"0\",\n# \"vla\": None,\n# \"zone_number\": \"6\",\n# \"slave\": None,\n# \"master\": None,\n# \"basic_mode\": \"0\",\n# \"ambient_temp\": \"24.6\",\n# \"heat_type\": None,\n# \"cold_type\": None,\n# \"heat_type_config\": \"1\",\n# \"cold_type_config\": \"1\",\n# \"ventilation\": None,\n# \"q_weight\": None,\n# \"window\": None,\n# \"presence\": None,\n# \"spray_dew\": None,\n# \"local_vent\": None,\n# \"tact_fw\": \"3. 7\",\n# \"firm_lm\": None,\n# \"manufacturer\": None,\n# \"led\": None,\n# \"velMax\": None,\n# \"confort_cold_consign\": None,\n# \"confort_heat_consign\": None,\n# \"eco_cold_consign\": None,\n# \"eco_heat_consign\": None,\n# \"unocupied_cold_consign\": None,\n# \"unocupied_heat_consign\": None,\n# \"vacation_cold_consign\": None,\n# \"vacation_heat_consign\": None,\n# \"firm_ws\": \"3.173\",\n# \"offset\": None,\n# \"errors\": \"0\",\n# \"zone_id\": \"...\",\n# \"automatic_weight\": None,\n# \"autochange_differential\": None,\n# \"offset_environment_cold\": None,\n# \"offset_environment_heat\": None,\n# \"eco_function\": None,\n# \"heat_constant_ventilation\": None,\n# \"cold_constant_ventilation\": None,\n# \"v_min_module_010\": None,\n# \"v_max_module_010\": None,\n# \"cold_battery_temperature\": None,\n# \"heat_battery_temperature\": None,\n# \"VAF_coldstage\": None,\n# \"VAF_heatstage\": None,\n# \"VAF_radiantstage\": None,\n# },\n# }\n" }, { "alpha_fraction": 0.5765193104743958, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 43.69135665893555, "blob_id": "05cdfac42167b29ad57547b1f27a865bf74f0961", "content_id": "b21ef1fc7575f8bfc7d3ad6a428080f9a48687c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3620, "license_type": "permissive", "max_line_length": 295, "num_lines": 81, "path": "/AirzoneCloud/contants.py", "repo_name": "gpulido/AirzoneCloud", "src_encoding": "UTF-8", "text": "API_LOGIN = \"/users/sign_in\"\nAPI_DEVICE_RELATIONS = \"/device_relations\"\nAPI_SYSTEMS = \"/systems\"\nAPI_ZONES = \"/zones\"\nAPI_EVENTS = \"/events\"\n\n# 2020-04-18: extracted from https://airzonecloud.com/assets/application-506494af86e686bf472b872d02048b42.js\n\nMODES_CONVERTER = {\n \"0\": {\"name\": \"stop\", \"description\": \"Stop\"},\n \"1\": {\"name\": \"cool-air\", \"description\": \"Air cooling\"},\n \"2\": {\"name\": \"heat-radiant\", \"description\": \"Radiant heating\"},\n \"3\": {\"name\": \"ventilate\", \"description\": \"Ventilate\"},\n \"4\": {\"name\": \"heat-air\", \"description\": \"Air heating\"},\n \"5\": {\"name\": \"heat-both\", \"description\": \"Combined heating\"},\n \"6\": {\"name\": \"dehumidify\", \"description\": \"Dry\"},\n \"7\": {\"name\": \"not_exit\", \"description\": \"\"},\n \"8\": {\"name\": \"cool-radiant\", \"description\": \"Radiant cooling\"},\n \"9\": {\"name\": \"cool-both\", \"description\": \"Combined cooling\"},\n}\n\nSCHEDULE_MODES_CONVERTER = {\n \"0\": {\"name\": \"\", \"description\": \"\"},\n \"1\": {\"name\": \"stop\", \"description\": \"Stop\"},\n \"2\": {\"name\": \"ventilate\", \"description\": \"Ventilate\"},\n \"3\": {\"name\": \"cool-air\", \"description\": \"Air cooling\"},\n \"4\": {\"name\": \"heat-air\", \"description\": \"Air heating\"},\n \"5\": {\"name\": \"heat-radiant\", \"description\": \"Radiant heating\"},\n \"6\": {\"name\": \"heat-both\", \"description\": \"Combined heating\"},\n \"7\": {\"name\": \"dehumidify\", \"description\": \"Dry\"},\n \"8\": {\"name\": \"cool-radiant\", \"description\": \"Radiant cooling\"},\n \"9\": {\"name\": \"cool-both\", \"description\": \"Combined cooling\"},\n}\n\nVELOCITIES_CONVERTER = {\n \"0\": {\"name\": \"auto\", \"description\": \"Auto\"},\n \"1\": {\"name\": \"velocity-1\", \"description\": \"Low speed\"},\n \"2\": {\"name\": \"velocity-2\", \"description\": \"Medium speed\"},\n \"3\": {\"name\": \"velocity-3\", \"description\": \"High speed\"},\n}\n\nAIRFLOW_CONVERTER = {\n \"0\": {\"name\": \"airflow-0\", \"description\": \"Silence\"},\n \"1\": {\"name\": \"airflow-1\", \"description\": \"Standard\"},\n \"2\": {\"name\": \"airflow-2\", \"description\": \"Power\"},\n}\n\nECO_CONVERTER = {\n \"0\": {\"name\": \"eco-off\", \"description\": \"Eco off\"},\n \"1\": {\"name\": \"eco-m\", \"description\": \"Eco manual\"},\n \"2\": {\"name\": \"eco-a\", \"description\": \"Eco A\"},\n \"3\": {\"name\": \"eco-aa\", \"description\": \"Eco A+\"},\n \"4\": {\"name\": \"eco-aaa\", \"description\": \"Eco A++\"},\n}\n\nSCENES_CONVERTER = {\n \"0\": {\n \"name\": \"stop\",\n \"description\": \"The air-conditioning system will remain switched off regardless of the demand status of any zone, all the motorized dampers will remain opened\",\n },\n \"1\": {\n \"name\": \"confort\",\n \"description\": \"Default and standard user mode. The desired set point temperature can be selected using the predefined temperature ranges\",\n },\n \"2\": {\n \"name\": \"unocupied\",\n \"description\": \"To be used when there is no presence detected for short periods of time. A more efficient set point temperature will be set. If the thermostat is activated, the zone will start running in comfort mode\",\n },\n \"3\": {\n \"name\": \"night\",\n \"description\": \"The system automatically changes the set point temperature 0.5\\xba C/1\\xba F every 30 minutes in up to 4 increments of 2\\xba C/4\\xba F in 2 hours. When cooling, the system increases the set point temperature; when heating, the system decreases the set point temperature\",\n },\n \"4\": {\n \"name\": \"eco\",\n \"description\": \"The range of available set point temperatures change for more efficient operation\",\n },\n \"5\": {\n \"name\": \"vacation\",\n \"description\": \"This mode feature saves energy while the user is away for extended periods of time\",\n },\n}\n" } ]
8
Molipow/PyBot
https://github.com/Molipow/PyBot
1013e7c775d7cfe10e5c0f144494c61404bd5a53
e985b6cf1fb7361dc806123e3dec86635dc85545
b4094d4740002a1e691469366a9f5620b14bf527
refs/heads/master
2021-01-04T14:34:12.715974
2020-02-22T12:20:22
2020-02-22T12:20:22
240,590,450
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5727771520614624, "alphanum_fraction": 0.5787047147750854, "avg_line_length": 38.61738967895508, "blob_id": "6fc66f5295ef30ef57e685da638580b79e441162", "content_id": "ead96ca040f910ddad63864c18f4886f7fff242a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4622, "license_type": "no_license", "max_line_length": 327, "num_lines": 115, "path": "/cogs/utilities.py", "repo_name": "Molipow/PyBot", "src_encoding": "UTF-8", "text": "import discord\nimport random\nimport math\nimport time\nimport json\nfrom discord.ext import commands\n\nclass Utilities(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_ready(self):\n print('Utilities module loaded')\n\n @commands.command()\n async def pong(self, ctx):\n await ctx.send(\"Ping.\")\n\n @commands.command()\n async def ping(self, ctx):\n responses = [\n \"Pong.\",\n \"Pong!\",\n \"Pong?\"\n ]\n await ctx.send(responses[random.randint(0, 2)])\n \n @commands.command()\n async def rzuć(self, ctx, max_roll):\n try:\n max_roll = int(max_roll)\n except ValueError:\n await ctx.send(\"```To nie jest liczba.```\")\n return\n result = random.randint(1, max_roll)\n response = \"\"\n if result <= math.floor(max_roll/3):\n response = \"Słabo.\"\n elif math.floor(max_roll / 3) < result <= math.ceil(max_roll / 3 * 2):\n response = \"Całkiem nieźle.\"\n else:\n response = \"Spoko jest.\"\n\n await ctx.send(f\"```Wyrzuciłeś {result}. {response}```\")\n \n @commands.command()\n @commands.has_permissions(manage_messages = True)\n async def czyść(self, ctx, amount = 5):\n await ctx.channel.purge(limit = amount+1)\n \n @commands.command()\n async def pomoc(self, ctx, command_for_help = None):\n if command_for_help == None:\n await ctx.send(\"```Komendy:\\n czyść\\n ping\\n pong\\n pomoc\\n remindme\\n rzuć\\n operator\\n syntax\\nNapisz `!py pomoc [komenda]` aby dostać dokładniejsze informacje.```\")\n elif command_for_help == \"czyść\":\n await ctx.send(\"```Komenda czyść\\n czyść <liczba>\\nCzyści n(domyślnie 5) wiadomości z czatu.```\")\n elif command_for_help == \"ping\":\n await ctx.send(\"```Komenda ping\\n ping\\nPong!```\")\n elif command_for_help == \"pong\":\n await ctx.send(\"```Komenda pong\\n pong\\nPing!```\")\n elif command_for_help == \"pomoc\":\n await ctx.send(\"```Komenda pomoc\\n pomoc\\nWyświetla listę komend.```\")\n elif command_for_help == \"remindme\":\n await ctx.send(\"```Komenda remindme\\n remindme [czas] [wiadomość] <kanał>\\nUstawia przypomnienie wraz z wiadomością. Jeśli wiadomość zawiera więcej niż jedno słowo, nalezy ją umieścić w cudzysłowie. Jeśli kanał nie zostanie wybrany, przypomnienie zostanie wysłane na kanale z którego została wykonana komenda.```\")\n elif command_for_help == \"rzuć\":\n await ctx.send(\"```Komenda rzuć\\n rzuć [liczba]\\nRzuca kostką n-ścienną.```\")\n elif command_for_help == \"operator\":\n await ctx.send(\"```Komenda operator\\n operator [operator]\\nWyświetla krótki opis operatora. !py operator aby wyświetlić listę operatorów```\")\n elif command_for_help == \"syntax\":\n await ctx.send(\"```Komenda syntax\\n syntax [wyrażenie]\\nWyświetla krótki opis wyrażenia. !py syntax aby wyświetlić listę wyrażeń```\")\n else:\n await ctx.send(\"```Nie znaleziono komendy.```\")\n\n @commands.command()\n async def github(self, ctx):\n await ctx.send(\"https://github.com/Molipow/PyBot\")\n\n @commands.command()\n async def remindme(self, ctx, remind_time, message, channel = None):\n unit = remind_time[-1:]\n time_to_add_raw = int(remind_time[:-1])\n if unit == 's' or unit == 'S':\n time_to_add = time_to_add_raw\n elif unit == 'm' or unit == 'M':\n time_to_add = time_to_add_raw * 60\n elif unit == 'h' or unit == 'H':\n time_to_add = time_to_add_raw * 60 * 60\n elif unit == 'd' or unit == 'D':\n time_to_add = time_to_add_raw * 60 * 60 * 24\n else:\n time_to_add = time_to_add_raw\n \n remind_date = time.time() + time_to_add\n\n with open(\"remind.json\", \"r\") as f:\n reminds = json.load(f)\n\n if channel == None:\n channel = ctx.channel\n else:\n channel = channel[2:]\n channel = channel[:-1]\n channel = self.client.get_channel(int(channel))\n \n reminds[str(remind_date)] = message, str(channel.id), str(ctx.author.id)\n\n with open(\"remind.json\", \"w\") as f:\n json.dump(reminds, f, indent=4)\n\n await ctx.send(f\"Przypomne za {remind_time} na kanale {channel.mention}. Wiadomość: {message}\")\n \ndef setup(client):\n client.add_cog(Utilities(client))" }, { "alpha_fraction": 0.6169264912605286, "alphanum_fraction": 0.6236079931259155, "avg_line_length": 21.5, "blob_id": "7a7e63fff0fe3a5903ddce305da7ad35cd2c96bb", "content_id": "d31b707f5003126f4225ca4aa650ccbe1b23c83e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/cogs/example.py", "repo_name": "Molipow/PyBot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\n\nclass Example(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_ready(self):\n print('Example module loaded')\n\n @commands.command()\n async def riddle1(self, ctx):\n await ctx.send(file=discord.File(fp=\"1.png\" \\\n # , filename=\"1.png\" \\\n ))\n\ndef setup(client):\n client.add_cog(Example(client))" }, { "alpha_fraction": 0.606367826461792, "alphanum_fraction": 0.6227321028709412, "avg_line_length": 84.19696807861328, "blob_id": "488ee3321d6536d0c4ec1a5b9cf249be2ffde3e8", "content_id": "b9595179293b9d4ec399d575fbd929e099796351", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5710, "license_type": "no_license", "max_line_length": 522, "num_lines": 66, "path": "/cogs/pyhelp.py", "repo_name": "Molipow/PyBot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\n\nclass PyHelp(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_ready(self):\n print('PyHelp module loaded')\n\n @commands.command()\n async def syntax(self, ctx, keyword = None):\n if keyword == None:\n await ctx.send(\"```!py syntax [wyrażenie]\\nDostępne wyrażenia:\\nif\\nelif\\nelse```\")\n elif keyword == \"if\":\n await ctx.send(\"```py\\nif warunek:\\n kod```\\n`if - Służy do tworzenia instrukcji warunkowych.`\\nPrzykład:\\n```py\\nif 2>1:\\n print('prawda')\\nprint('poza ifem')``` ```WAŻNE! Przy porównywaniu wartości należy użyć '==' a nie '='.```\")\n elif keyword == \"elif\":\n await ctx.send(\"```py\\nif warunek:\\n kod\\nelif inny_warunek:\\n inny_kod```\\n`elif - Jeśli poprzedni warunek okazał się fałszywy, sprawdzamy inny. Jeśli jednak byl prawdziwy, pomijamy sprawdzanie tych.`\\nPrzykłady:\\n```py\\nif False:\\n print('Tej wiadomości nie zobaczymy')\\nelif True:\\n print('Tą wiadomość zobaczymy!')```\\n```py\\nif True:\\n print('Tą zobaczymy')\\nelif True:\\n print('A tej nie')```\\n```py\\nif False:\\n print('Tej nie zobaczymy')\\nelif False:\\n print('Tej też nie')```\")\n elif keyword == \"else\":\n await ctx.send(\"```py\\nif warunek:\\n kod\\nelse:\\n inny_kod```\\n`else - Jeśli poprzednie warunki są fałszywe, wykonujemy ten blok`\\nPrzykład:\\n```py\\nif False:\\n print('To się nie wykona')\\nelse:\\n print('To już tak')```\\n```py\\nif True:\\n print('To się wykona')\\nelse:\\n print('To już nie')```\")\n else:\n await ctx.send(\"Nieznane wyrażenie. Jeśli uważasz, że powinno ono tu być, napisz do @Molipow#3955\")\n\n @commands.command()\n async def operator(self, ctx, keyword = None):\n if keyword == None:\n await ctx.send(\"```!py operator [operator]\\nDostępne operatory:\\n+\\n-\\n*\\n/\\n%\\n**\\n//\\n==\\n!=\\n>\\n<\\n>=\\n<=\\nand\\nor\\nnot```\")\n elif keyword == \"+\":\n await ctx.send(\"`+ - operator arytmetyczny. Dodaje 2 liczby do siebie`\\nPrzykład:\\n```py\\nprint(2+3) # wyjdzie 5```\")\n elif keyword == \"-\":\n await ctx.send(\"`- - operator arytmetyczny. Odejmuje 2 liczby od siebie`\\nPrzykład:\\n```py\\nprint(2-3) # wyjdzie -1```\")\n elif keyword == \"*\":\n await ctx.send(\"`* - operator arytmetyczny. Mnoży 2 liczby przez siebie`\\nPrzykład:\\n```py\\nprint(2*3) # wyjdzie 6```\")\n elif keyword == \"/\":\n await ctx.send(\"`/ - operator arytmetyczny. Dodaje 2 liczby do siebie`\\nPrzykład:\\n```py\\nprint(6/3) # wyjdzie 2.0\\n # operator '/' zwraca wynik jako float```\")\n elif keyword == \"%\":\n await ctx.send(\"`% - operator arytmetyczny. Zwraca resztę z dzielenia liczby 1 przez liczbę 2`\\nPrzykład:\\n```py\\nprint(3%2) # wyjdzie 1```\")\n elif keyword == \"**\":\n await ctx.send(\"`** - operator arytmetyczny. Zwraca liczbę 1 do potęgi liczby 2`\\nPrzykład:\\n```py\\nprint(2**3) # wyjdzie 8```\")\n elif keyword == \"//\":\n await ctx.send(\"`// - operator arytmetyczny. Zwraca całości z dzielenia liczby 1 przez liczbę 2`\\nPrzykład:\\n```py\\nprint(3//2) # wyjdzie 1```\")\n elif keyword == \"==\":\n await ctx.send(\"`== - operator porównania. Sprawdza czy argumenty są równe. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2==3) # wyjdzie False\\nprint(3==3) # wyjdzie True```\")\n elif keyword == \"!=\":\n await ctx.send(\"`!= - operator porównania. Sprawdza czy argumenty są różne. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2==3) # wyjdzie True\\nprint(3==3) # wyjdzie False```\")\n elif keyword == \">\":\n await ctx.send(\"`> - operator porównania. Sprawdza czy argument po lewej jest większy. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2>3) # wyjdzie False\\nprint(3>2) # wyjdzie True```\")\n elif keyword == \"<\":\n await ctx.send(\"`< - operator porównania. Sprawdza czy argument po prawej jest większy. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2<3) # wyjdzie True\\nprint(3<2) # wyjdzie False```\")\n elif keyword == \">=\":\n await ctx.send(\"`>= - operator porównania. Sprawdza czy argument po lewej jest większy lub równy. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2>=3) # wyjdzie False\\nprint(3>=3) # wyjdzie True```\")\n elif keyword == \"<=\":\n await ctx.send(\"`<= - operator porównania. Sprawdza czy argument po prawej jest większy lub równy. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2<=3) # wyjdzie True\\nprint(3<=3) # wyjdzie True```\")\n elif keyword == \"and\":\n await ctx.send(\"`and - operator logiczny. Sprawdza czy lewa i prawa strona są prawdziwe. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2<3 and 3==3) # wyjdzie True\\nprint(2<3 and 3!=3) # wyjdzie False```\")\n elif keyword == \"or\":\n await ctx.send(\"`or - operator logiczny. Sprawdza czy lewa lub prawa strona są prawdziwe. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(2<3 or 3==3) # wyjdzie True\\nprint(2<3 or 3!=3) # wyjdzie True```\")\n elif keyword == \"not\":\n await ctx.send(\"`not - operator logiczny. Neguje wartość. Zwraca True albo False`\\nPrzykład:\\n```py\\nprint(not False) # wyjdzie True\\nprint(not 2<3) # wyjdzie False\\nprint(not 2<3 or 3==3) # wyjdzie True\\nprint(not (2<3 or 3==3)) # wyjdzie False```\\n`(nawiasy mają znaczenie ;p)`\")\n else:\n await ctx.send(\"Nieznany operator. Jeśli uważasz, że powinien on tu być, napisz do @Molipow#3955\")\n\ndef setup(client):\n client.add_cog(PyHelp(client))" }, { "alpha_fraction": 0.6697247624397278, "alphanum_fraction": 0.6884403824806213, "avg_line_length": 31.452381134033203, "blob_id": "c0e69701431e6858fc02718c5e8596a5b43c056c", "content_id": "6128250f9c2545af089f124c4e7f24d22b48d655", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2739, "license_type": "no_license", "max_line_length": 142, "num_lines": 84, "path": "/bot.py", "repo_name": "Molipow/PyBot", "src_encoding": "UTF-8", "text": "import discord\nimport os\nimport json\nimport time\nfrom discord.ext import commands, tasks\n\n# https://discordapp.com/oauth2/authorize?client_id=677895248088662025&permissions=8&scope=bot Add bot with Admin permission\n# https://discordapp.com/api/oauth2/authorize?client_id=677895248088662025&permissions=518208&scope=bot Add bot with Non-admin permissions\n\nclient = commands.Bot(command_prefix = '!py ')\n\n@client.event\nasync def on_ready():\n check_reminds.start()\n print(\"Bot working\")\n await client.change_presence(status=discord.Status.online, activity=discord.Game(\"!py pomoc\"))\n\n@client.event\nasync def on_member_join(member):\n print(f'{member} dołączył')\n\n@client.event\nasync def on_member_remove(member):\n print(f'{member} wyszedł')\n\n@client.command(aliases=[\"reset\",\"restart\", \"reboot\"])\n@commands.has_permissions(administrator = True)\nasync def _reset(ctx):\n await ctx.send(\"Restartowanie bota\")\n await client.close()\n os.system(\"cls\")\n os.system(\"echo Bot restarting\")\n os.system(\"python bot.py\")\n\n@tasks.loop(seconds = 1)\nasync def check_reminds():\n to_pop=[]\n with open('remind.json', 'r') as f:\n reminds = json.load(f)\n for item in reminds:\n if float(item) <= float(time.time()):\n channel = client.get_channel(int(reminds[item][1]))\n author = client.get_user(int(reminds[item][2]))\n message = str(reminds[item][0])\n to_pop.append(item)\n await channel.send(f\"RemindMe! przez {author.mention}. Wiadomość: {message}\")\n for i in to_pop:\n reminds.pop(i)\n with open('remind.json', 'w') as f:\n json.dump(reminds, f, indent=4)\n\n@check_reminds.after_loop\nasync def check_reminds_restart():\n print('done!')\n print(check_reminds.failed())\n check_reminds.restart()\n\n@client.command()\n@commands.has_permissions(administrator = True)\nasync def load(ctx, extension):\n client.load_extension(f\"cogs.{extension}\")\n await ctx.send(f\"Włączono moduł {extension}\")\n\n@client.command()\n@commands.has_permissions(administrator = True)\nasync def unload(ctx, extension):\n client.unload_extension(f\"cogs.{extension}\")\n await ctx.send(f\"Wyłączono moduł {extension}\")\n\n@client.command()\n@commands.has_permissions(administrator = True)\nasync def reload(ctx, extension):\n client.unload_extension(f\"cogs.{extension}\")\n client.load_extension(f\"cogs.{extension}\")\n await ctx.send(f\"Przeładowano moduł {extension}\")\n\nfor filename in os.listdir(\"./cogs\"):\n if filename.endswith('.py'):\n client.load_extension(f\"cogs.{filename[:-3]}\")\n\nfile = open(\"D:/PROGRAMOWANIE/Python/token.txt\") # change to your token file\ntoken = file.readline()\nfile.close()\nclient.run(token)" }, { "alpha_fraction": 0.6163182854652405, "alphanum_fraction": 0.6284558176994324, "avg_line_length": 33.511627197265625, "blob_id": "46c9fa4bccbcfaf285f5024225031e3928f4482a", "content_id": "daabb53d9ff574ca7cda52451e9ec92ee0287e85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 169, "num_lines": 43, "path": "/cogs/voice.py", "repo_name": "Molipow/PyBot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\n\nclass Voice(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n \n @commands.Cog.listener()\n async def on_ready(self):\n print('Voice module loaded')\n\n @commands.command() \n async def join(self, ctx): \n channel = ctx.author.voice.channel\n self.vc_connection = await channel.connect()\n\n @commands.command()\n async def play(self, ctx, song_name):\n is_connected = False\n song = discord.FFmpegPCMAudio(executable = \"D:\\\\Program Files\\\\ffmpeg-20200216-8578433-win64-static\\\\bin\\\\ffmpeg.exe\", source = f\"voice/{song_name.lower()}.mp3\")\n for connection in self.client.voice_clients:\n if connection.channel.id == ctx.author.voice.channel.id:\n connection.play(song)\n is_connected = True\n if not is_connected:\n vc_connection = await ctx.author.voice.channel.connect()\n vc_connection.play(song)\n\n @commands.command()\n async def stop(self, ctx):\n for connection in self.client.voice_clients:\n if connection.channel.id == ctx.author.voice.channel.id:\n connection.stop()\n\n @commands.command()\n async def leave(self, ctx):\n for connection in self.client.voice_clients:\n if connection.channel.id == ctx.author.voice.channel.id:\n await connection.disconnect()\n\ndef setup(client):\n client.add_cog(Voice(client))" } ]
5
ChrisAvalos/astr-119-hw-2
https://github.com/ChrisAvalos/astr-119-hw-2
01ad108e6abf200db6ac9ced6c373205dd77ba3a
4c1d1f8f633f4f48d23d401abc75c052db9aa5aa
7e5481cb78c59a9dabcbac7a8c9da9cd0edb426b
refs/heads/main
2022-12-28T12:55:14.673977
2020-10-14T22:01:02
2020-10-14T22:01:02
304,142,553
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.748110830783844, "alphanum_fraction": 0.748110830783844, "avg_line_length": 18.799999237060547, "blob_id": "d9aeb712f2ad33820efe99bc3297154524d5b388", "content_id": "c85391834bce7ad7bc00f160d6284e10e18efd38", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "permissive", "max_line_length": 55, "num_lines": 20, "path": "/exceptions.py", "repo_name": "ChrisAvalos/astr-119-hw-2", "src_encoding": "UTF-8", "text": "#python exceptions let you deal with unexpected results\n\n\ntry:\n\tprint(a)\t#this will throw an exception\nexcept:\n\tprint(\"a is not defined\")\n\n\n#there are specific errors in python to help with cases\n\ntry:\n\tprint(a)\t#this will throw a NameError\nexcept NameError:\n\tprint(\"a is still not defined\")\nexcept:\n\tprint(\"something else went wrong!\")\n\n#This will break our program since a not defined\nprint(a)\n\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 9, "blob_id": "e37a67f39dec81bbfd0eac0fef6a93c027d2a389", "content_id": "2932e28cb378686be48f4340ffb7a0b36c928ca7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "permissive", "max_line_length": 15, "num_lines": 2, "path": "/README.md", "repo_name": "ChrisAvalos/astr-119-hw-2", "src_encoding": "UTF-8", "text": "# astr-119-hw-2\nhw2\n" }, { "alpha_fraction": 0.6806020140647888, "alphanum_fraction": 0.6872909665107727, "avg_line_length": 21.148147583007812, "blob_id": "9c4deea81d41373806f550f7b6d3dcf4f9241369", "content_id": "598b75e18d1440ae8d52e336b08c86fe3d511bb4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "permissive", "max_line_length": 64, "num_lines": 27, "path": "/functions.py", "repo_name": "ChrisAvalos/astr-119-hw-2", "src_encoding": "UTF-8", "text": "import numpy as np\nimport sys\n\n#define a function that returns a value\ndef expo(x):\t#x is an argument to the function\n\treturn np.exp(x)\t#return the e^x function\n\n#define a subroutine that does not return a value\ndef show_expo(n):\n\tfor i in range(n):\n\t\tprint(expo(float(i))) #call the expo function\n\n\n#define a main function\ndef main():\n\tn = 10 # provide a default calue for n\n\n\t# check if there is a command line arg\n\tif(len(sys.argv)>1):\n\t\tn = int(sys.argv[1]) #if additional arg provvided, set n = arg\n\n\t#print e^x n times\n\tshow_expo(n)\n\n#run the main function\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.644817054271698, "alphanum_fraction": 0.6661585569381714, "avg_line_length": 28.727272033691406, "blob_id": "932dbf129514d5a922c39e9b720f54937153c3e3", "content_id": "6e62861323eecdf266d95f638a0536fbe786be2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "permissive", "max_line_length": 64, "num_lines": 22, "path": "/data_types.py", "repo_name": "ChrisAvalos/astr-119-hw-2", "src_encoding": "UTF-8", "text": "import numpy as np \t\t#import numpy library\n\n#integers\ni = 10 \t\t\t#integer\nprint(\"The type of i is \",type(i))\t#print out the type\n\na_i = np.zeros(i,dtype=int)\t#an array of ints\nprint(\"The type of a_i is \",type(a_i))\t#print out the type\nprint(\"The type of a_i[0] is \",type(a_i[0]))\t#print out the type\n\n\n#floats\nx = 119.0 #floating point number\nprint(\"the type of x is \",type(x))\t#print out the type\n\ny = 1.19e2 #sci not float\nprint(\"The type of y is \",type(y)) \t#print out the type\n\n\nz = np.zeros(i,dtype=float)\t#declare an array of floats\nprint(\"The type of z is \",type(z))\t#print out the type\nprint(\"The type of z[0] is \",type(z[0]))\t#print out the type\n\n " } ]
4
JunZhenSun/tiff_Process
https://github.com/JunZhenSun/tiff_Process
9648c5c0da7821b8005997fca86112552df3addd
9cbb5dec945afff95c8b749c50a434f12e5c4f2f
ef10cd060903bfa5f461fbbcd0f7fdaa93a7d72e
refs/heads/master
2023-03-12T22:31:26.287691
2021-03-01T05:44:05
2021-03-01T05:44:05
343,304,764
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4915693998336792, "alphanum_fraction": 0.5538262128829956, "avg_line_length": 23.09375, "blob_id": "dbf9b33ac299c04be74131cb4b402e153bff7808", "content_id": "d6b683dff2a7b4dac385ef4f225cbe2d10f1944a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 933, "license_type": "no_license", "max_line_length": 63, "num_lines": 32, "path": "/tiff处理/001.tiff转png快速.py", "repo_name": "JunZhenSun/tiff_Process", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\n\ndef read_tif(imgPath): # 主函数 “三通道tiff”转“彩色png”\n img = cv2.imread(imgPath, 3) # 读取图片 imgpath为图片所在位置\n # print(img.dtype)\n\n # 特异值剔除\n img[img > 1450] = 1450\n img[img < 0] = 0\n\n min1 = img.min()\n max1 = 1450\n img = ((img - min1) / (max1 - min1) * 255).astype(np.uint8)\n im_shape = img.shape\n print(im_shape) # 显示图片大小和通道数 通道数为3\n b = img[:, :, 2] # 蓝通道\n g = img[:, :, 1] # 绿通道\n r = img[:, :, 0] # 红通道\n\n x = r + b + g\n a = ((1 - np.equal(x, 0)) * 255).astype(np.uint8)\n\n # 通道拼接 两种方法\n rgba = cv2.merge([b, g, r, a])\n\n # plt.matshow(rgba) # matplotlib的matshow()可以直接看矩阵而不用进行位数转换\n cv2.imwrite(\"C:/Users/dell/Desktop/1.png\", rgba) # 保存图片\n\n\nread_tif(r\"D:\\data\\007_anji\\img2018-05.tif\")\n" }, { "alpha_fraction": 0.5190725326538086, "alphanum_fraction": 0.5617052912712097, "avg_line_length": 21.6610164642334, "blob_id": "ed14d72f50e1eec58b147d4e4b2fb7d10ec98a74", "content_id": "e44e99df1446e285d5b14ea5dbf9088aca73bfe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 77, "num_lines": 59, "path": "/tiff处理/001.tiff转png慢速.py", "repo_name": "JunZhenSun/tiff_Process", "src_encoding": "UTF-8", "text": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numba import jit\n\n\ndef image_pre(x, min, max): # 特异值剔除\n if x > max:\n return max\n if x < min:\n return min\n else:\n return x\n\n\n@jit(nopython=True) # 使用C++加快运行速度\ndef opti(img): # 设置透明背景\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n if img[i, j, 0] == 0 and img[i, j, 1] == 0 and img[i, j, 2] == 0:\n img[i, j, 3] = 1.0\n return img\n\n\ndef read_tif(imgpath): # 主函数 “三通道tiff”转“彩色png”\n img = cv2.imread(imgpath, 3) # 读取图片 imgpath为图片所在位置\n print(img.dtype)\n\n fun1 = np.frompyfunc(image_pre, 3, 1)\n print(\"哈哈哈\")\n img = fun1(img, 0, 1450).astype(np.uint16)\n\n min1 = img.min()\n max1 = 1450\n img = (img - min1) / (max1 - min1)\n img = img * 255\n img = img.astype(np.uint8)\n im_shape = img.shape\n print(im_shape) # 显示图片大小和通道数 通道数为3\n b = img[:, :, 2] # 蓝通道\n g = img[:, :, 1] # 绿通道\n r = img[:, :, 0] # 红通道\n\n\n\n # 通道拼接 两种方法\n bgr = cv2.merge([b, g, r])\n\n rgba = cv2.cvtColor(bgr, cv2.COLOR_RGB2RGBA)\n print(type(rgba))\n print(im_shape[0])\n # 背景设为透明\n opti(rgba)\n\n plt.matshow(rgba) # matplotlib的matshow()可以直接看矩阵而不用进行位数转换\n cv2.imwrite(\"C:/Users/dell/Desktop/1.png\", rgba) # 保存图片\n\n\nread_tif(\"D:/data/ndvi/Image20200510.tif\")\n" }, { "alpha_fraction": 0.4378530979156494, "alphanum_fraction": 0.5847457647323608, "avg_line_length": 22.66666603088379, "blob_id": "648a81a9935919045b66e64a27eb363fa1b476da", "content_id": "e28c8bbc697bb030dc4a9348b36324cd85c27c90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 64, "num_lines": 15, "path": "/GEE/GEEexample.py", "repo_name": "JunZhenSun/tiff_Process", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport ee\nimport os\nos.environ['HTTP_PROXY'] = 'http://127.0.0.1:1080'\nos.environ['HTTPS_PROXY'] = 'http://127.0.0.1:1080'\n\nee.Initialize()\nimage1 = ee.Image('srtm90_v4')\npath = image1.getDownloadUrl({\n 'scale': 30,\n 'crs': 'EPSG:4326',\n 'region': '[[-120, 35], [-119, 35], [-119, 34], [-120, 34]]'\n})\n# 获取下载地址\nprint(path)" }, { "alpha_fraction": 0.5940752625465393, "alphanum_fraction": 0.620496392250061, "avg_line_length": 25.02083396911621, "blob_id": "76e6afb2c5c1bdfacf5d53b023de91ea6846c743", "content_id": "24a94d60ddaa3325f444b6cf97490d6a8f5e57c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1371, "license_type": "no_license", "max_line_length": 105, "num_lines": 48, "path": "/知识图谱/对服装图像进行分类.py", "repo_name": "JunZhenSun/tiff_Process", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\npath = \"C:/Users/dell/Desktop/转股价值与转股价的拟合图.csv\"\ndata = np.array(np.loadtxt(path, dtype=str, delimiter=',', skiprows=1, usecols=(5, 6), encoding='utf-8'))\nhead = np.array(np.loadtxt(path, dtype=str, delimiter=',', skiprows=1, usecols=2, encoding='utf-8'))\ndata = np.delete(data, [-1, -2], axis=0)\nhead = np.delete(head, [-1, -2], axis=0)\n\nj = 0\n# 缩短标签\nfor i in head:\n head[j] = i.split('转')[0]\n j = j + 1\nj = 0\nfor k in data[:, 1]:\n data[j, 1] = k.split('*')[0]\n j = j + 1\n\naxes = plt.gca()\ndatax = data[:, 0].astype(float)\ndatay = data[:, 1].astype(float)\n\nxmin = datax.min()\nxmax = datax.max()\nymin = datay.min()\nymax = datay.max()\n\n# 绘制拟合曲线\nparameter = np.polyfit(datax, datay, 2)\np = np.poly1d(parameter)\nx = np.array(range(int(xmin), int(xmax), 1))\nplt.plot(x, p(x), color='r')\n\nplt.xlabel('转股价值')\nplt.ylabel('转股价')\nplt.title('股价藏宝图')\n\nplt.scatter(datax, datay)\nfor i in range(len(datax)):\n plt.annotate(head[i], xy=(datax[i], datay[i]),\n xytext=(datax[i] + 0.1, datay[i] + 0.1)) # 这里xy是需要标记的坐标,xytext是对应的标签坐标\nplt.show()\nplt.legend(['拟合曲线', '散点图'])\nprint(data[-10:-1, :])\n" }, { "alpha_fraction": 0.6107311844825745, "alphanum_fraction": 0.6322987675666809, "avg_line_length": 35.55769348144531, "blob_id": "1919896d7dcac4324073d9d88e6f5680ac31aece", "content_id": "bd114a6b49e971096b38e710ca9e356beb2badce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2045, "license_type": "no_license", "max_line_length": 119, "num_lines": 52, "path": "/tiff处理/002.tiff添加透明背景波段.py", "repo_name": "JunZhenSun/tiff_Process", "src_encoding": "UTF-8", "text": "import gdal\nimport numpy as np\n\n\ndef readTif(fileName):\n dataset = gdal.Open(fileName)\n if dataset == None:\n print(fileName + \"文件无法打开\")\n return\n im_width = dataset.RasterXSize # 栅格矩阵的列数\n im_height = dataset.RasterYSize # 栅格矩阵的行数\n im_bands = dataset.RasterCount # 波段数\n im_data = dataset.ReadAsArray(0, 0, im_width, im_height) # 获取数据\n im_geotrans = dataset.GetGeoTransform() # 获取仿射矩阵信息\n im_proj = dataset.GetProjection() # 获取投影信息\n im_blueBand = im_data[0, 0:im_height, 0:im_width] # 获取蓝波段\n im_greenBand = im_data[1, 0:im_height, 0:im_width] # 获取绿波段\n im_redBand = im_data[2, 0:im_height, 0:im_width] # 获取红波段\n\n a = im_blueBand + im_greenBand + im_redBand\n a = ((1 - np.equal(a, 0)) * 255).astype(np.uint16)\n b = np.sum(a)\n new_im_data = np.stack([im_blueBand, im_greenBand, im_redBand, a], axis=0)\n writeTiff(new_im_data, im_width, im_height, im_bands + 1, im_geotrans, im_proj, path=\"C:/Users/dell/Desktop/3.tif\")\n\n\ndef writeTiff(im_data, im_width, im_height, im_bands, im_geotrans, im_proj, path):\n if 'int8' in im_data.dtype.name:\n datatype = gdal.GDT_Byte\n elif 'int16' in im_data.dtype.name:\n datatype = gdal.GDT_UInt16\n else:\n datatype = gdal.GDT_Float32\n\n if len(im_data.shape) == 3:\n im_bands, im_height, im_width = im_data.shape\n elif len(im_data.shape) == 2:\n im_data = np.array([im_data])\n else:\n im_bands, (im_height, im_width) = 1, im_data.shape\n # 创建文件\n driver = gdal.GetDriverByName(\"GTiff\")\n dataset = driver.Create(path, im_width, im_height, im_bands, datatype)\n if (dataset != None):\n dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数\n dataset.SetProjection(im_proj) # 写入投影\n for i in range(im_bands):\n dataset.GetRasterBand(i + 1).WriteArray(im_data[i])\n del dataset\n\n\nreadTif(\"D:/data/ndvi/Image20200410-1.tif\")\n" }, { "alpha_fraction": 0.49539169669151306, "alphanum_fraction": 0.5072964429855347, "avg_line_length": 28.931034088134766, "blob_id": "e913ce486ca7f615c34fc1b15d7ce4e44def5d13", "content_id": "5cdf3b9ea75b8290418ba591fdb3b105315d89a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2690, "license_type": "no_license", "max_line_length": 76, "num_lines": 87, "path": "/知识图谱/xmlTojson.py", "repo_name": "JunZhenSun/tiff_Process", "src_encoding": "UTF-8", "text": "import xmltodict\nimport json\nimport os\nimport csv\nfrom py2neo import Graph, Node, Relationship\n\n\ndef xmlToJson(path): # 将xml转换为json格式\n xml_str = open(path, \"r\").read()\n # '''传入xml字符串,返回字典'''\n dic = xmltodict.parse(xml_str, encoding='utf-8')\n dic = json.dumps(dic, indent=4)\n return dic\n\n\ndef getfiles(): # 遍历文件夹中的xml文档\n filenames = os.listdir(r'd:/data/006_矿产资源/46xml/xml/')\n filepaths = []\n for i in filenames:\n if '.xml' in i:\n filepaths.append(os.path.join(\"d:/data/006_矿产资源/46xml/xml/\", i))\n return filepaths\n\n\n# 数据库初始化\nmyGraph = Graph(\n \"http://localhost:7474\",\n username=\"neo4j\",\n password=\"password\"\n)\n\nfilepaths = getfiles()\n\nfor i in filepaths:\n f2 = open(i.replace(\".xml\", \".json\"), 'w')\n f2.write(xmlToJson(i))\n f2.close()\n\n dir = xmltodict.parse(open(i, \"r\").read(), encoding='utf-8')\n\n lis = []\n lis.append(dir['WMS_Capabilities']['Service']['Title'])\n a = dir['WMS_Capabilities']['Capability']['Layer']\n while type(a) != list and \"Layer\" in a.keys():\n a = a['Layer']\n if type(a) == list:\n for List in a:\n if \"Title\" in List.keys():\n lis.append(List[\"Title\"])\n else:\n lis.append(\"\")\n if \"Abstract\" in List.keys() and List[\"Abstract\"] != None:\n lis.append(List[\"Abstract\"].split(\":\")[-1])\n else:\n lis.append(\"\")\n if \"KeywordList\" in List.keys():\n if type(List[\"KeywordList\"][\"Keyword\"]) != list:\n for l1 in List[\"KeywordList\"][\"Keyword\"].split(\";\"):\n lis.append(l1)\n else:\n for ele in List[\"KeywordList\"][\"Keyword\"]:\n lis.append(ele)\n else:\n lis.append(\"\")\n else:\n if \"Title\" in a.keys():\n lis.append(a[\"Title\"])\n else:\n lis.append(\"\")\n if \"Abstract\" in a.keys() and a[\"Abstract\"] != None:\n lis.append(a[\"Abstract\"].split(\":\")[-1])\n else:\n lis.append(\"\")\n if \"KeywordList\" in a.keys():\n if type(a[\"KeywordList\"][\"Keyword\"]) != list:\n for l2 in a[\"KeywordList\"][\"Keyword\"].split(\";\"):\n lis.append(l2)\n else:\n for ele in a[\"KeywordList\"][\"Keyword\"]:\n lis.append(ele)\n else:\n lis.append(\"\")\n print(lis)\n csvFile = open(\"d:/data/006_矿产资源/data.csv\", \"at+\", newline=\"\")\n writer = csv.writer(csvFile)\n writer.writerow(lis)\n csvFile.close()\n" }, { "alpha_fraction": 0.502531886100769, "alphanum_fraction": 0.5179258584976196, "avg_line_length": 38.49599838256836, "blob_id": "0c582efad9b58be2ecc4a6d05860c528ebaac99d", "content_id": "eff16705fc905763d45c2a737c8aed87512325a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5739, "license_type": "no_license", "max_line_length": 92, "num_lines": 125, "path": "/知识图谱/Neo4jExample.py", "repo_name": "JunZhenSun/tiff_Process", "src_encoding": "UTF-8", "text": "import xmltodict\nimport json\nimport os\nimport csv\nfrom py2neo import Graph, Node, Relationship\n\n\ndef xmlToJson(path): # 将xml转换为json格式\n xml_str = open(path, \"r\").read()\n # '''传入xml字符串,返回字典'''\n dic = xmltodict.parse(xml_str, encoding='utf-8')\n dic = json.dumps(dic, indent=4)\n return dic\n\n\ndef getfiles(): # 遍历文件夹中的xml文档\n filenames = os.listdir(r'd:/data/006_矿产资源/46xml/xml/')\n filepaths = []\n for i in filenames:\n if '.xml' in i:\n filepaths.append(os.path.join(\"d:/data/006_矿产资源/46xml/xml/\", i))\n return filepaths\n\n\n# 数据库初始化\nmyGraph = Graph(\n \"http://localhost:7474\",\n username=\"neo4j2\",\n password=\"password\"\n)\n\nfilepaths = getfiles()\n\nfor i in filepaths:\n\n # f2 = open(i.replace(\".xml\", \".json\"), 'w') # 将xml转换为json并保存到本地\n # f2.write(xmlToJson(i))\n # f2.close()\n\n dir = xmltodict.parse(open(i, \"r\").read(), encoding='utf-8') # 将xml转换为字典\n\n lis = [] # 用于暂时存储单行数据的list\n\n lis.append(dir['WMS_Capabilities']['Service']['Title']) # 数据第一列:服务的标题\n\n fNode = Node(\"WMS\", name=dir['WMS_Capabilities']['Service']['Title']) # 创建服务实体节点\n myGraph.create(fNode) # 实体节点导入图谱\n\n a = dir['WMS_Capabilities']['Capability']['Layer']\n while type(a) != list and \"Layer\" in a.keys():\n a = a['Layer']\n if type(a) == list:\n for List in a:\n if \"Title\" in List.keys():\n lis.append(List[\"Title\"])\n node_1 = Node('Title', content=List[\"Title\"]) # 创建属性节点\n Rel_1 = Relationship(fNode, \"keyword\", node_1) # 创建实体节点与属性节点的关系\n myGraph.create(node_1) # 属性节点导入图谱\n myGraph.create(Rel_1) # 实体属性关系导入图谱\n else:\n lis.append(\"\")\n if \"Abstract\" in List.keys() and List[\"Abstract\"] != None:\n lis.append(List[\"Abstract\"].split(\":\")[-1])\n node_2 = Node('Abstract', content=List[\"Abstract\"].split(\":\")[-1]) # 创建属性节点\n Rel_2 = Relationship(fNode, \"keyword\", node_2) # 创建实体节点与属性节点的关系\n myGraph.create(node_2) # 属性节点导入图谱\n myGraph.create(Rel_2) # 实体属性关系导入图谱\n else:\n lis.append(\"\")\n if \"KeywordList\" in List.keys():\n if type(List[\"KeywordList\"][\"Keyword\"]) != list:\n for l1 in List[\"KeywordList\"][\"Keyword\"].split(\";\"):\n lis.append(l1)\n node_3 = Node(\"KeywordList\", content=l1) # 创建属性节点\n Rel_3 = Relationship(fNode, \"keyword\", node_3) # 创建实体节点与属性节点的关系\n myGraph.create(node_3) # 属性节点导入图谱\n myGraph.create(Rel_3) # 实体属性节点导入图谱\n else:\n for ele in List[\"KeywordList\"][\"Keyword\"]:\n lis.append(ele)\n node_4 = Node(\"KeywordList\", content=ele) # 创建属性节点\n Rel_4 = Relationship(fNode, \"keyword\", node_4) # 创建实体节点与属性节点的关系\n myGraph.create(node_4) # 属性节点导入图谱\n myGraph.create(Rel_4) # 实体属性关系导入图谱\n else:\n lis.append(\"\")\n else:\n if \"Title\" in a.keys():\n lis.append(a[\"Title\"])\n node_5 = Node('Title', content=a[\"Title\"]) # 创建属性节点\n Rel_5 = Relationship(fNode, \"keyword\", node_5) # 创建实体节点与属性节点的关系\n myGraph.create(node_5) # 属性节点导入图谱\n myGraph.create(Rel_5) # 实体属性关系导入图谱\n else:\n lis.append(\"\")\n if \"Abstract\" in a.keys() and a[\"Abstract\"] != None:\n lis.append(a[\"Abstract\"].split(\":\")[-1])\n node_6 = Node('Abstract', content=a[\"Abstract\"].split(\":\")[-1]) # 创建属性节点\n Rel_6 = Relationship(fNode, \"keyword\", node_6) # 创建实体节点与属性节点的关系\n myGraph.create(node_6) # 属性节点导入图谱\n myGraph.create(Rel_6) # 实体属性关系导入图谱\n else:\n lis.append(\"\")\n if \"KeywordList\" in a.keys():\n if type(a[\"KeywordList\"][\"Keyword\"]) != list:\n for l2 in a[\"KeywordList\"][\"Keyword\"].split(\";\"):\n lis.append(l2)\n node_7 = Node(\"KeywordList\", content=l2) # 创建属性节点\n Rel_7 = Relationship(fNode, \"keyword\", node_7) # 创建实体节点与属性节点的关系\n myGraph.create(node_7) # 属性节点导入图谱\n myGraph.create(Rel_7) # 实体属性关系导入图谱\n else:\n for ele in a[\"KeywordList\"][\"Keyword\"]:\n lis.append(ele)\n node_8 = Node(\"KeywordList\", content=ele) # 创建属性节点\n Rel_8 = Relationship(fNode, \"keyword\", node_8) # 创建实体节点与属性节点的关系\n myGraph.create(node_8) # 属性节点导入图谱\n myGraph.create(Rel_8) # 实体属性关系导入图谱\n else:\n lis.append(\"\")\n print(lis)\n csvFile = open(\"d:/data/006_矿产资源/data.csv\", \"at+\", newline=\"\")\n writer = csv.writer(csvFile)\n writer.writerow(lis)\n csvFile.close()\n" } ]
7
fmcevoy/playeah
https://github.com/fmcevoy/playeah
4df48273c409e6f0bc22f099c63c3d02fa76b90e
98c77bc6ba5b6f53cffeab9ed3133e682d26a54d
461167873dfc30822ac203adbe73c02e879a931b
refs/heads/master
2023-01-05T02:33:00.624225
2015-10-21T20:33:16
2015-10-21T20:33:16
36,100,960
0
0
NOASSERTION
2015-05-23T00:22:02
2015-05-23T22:30:55
2022-12-26T19:48:55
Python
[ { "alpha_fraction": 0.6011080145835876, "alphanum_fraction": 0.6121883392333984, "avg_line_length": 29.08333396911621, "blob_id": "bf14364fcbe0ebd4539048ab3ba01852c96421e0", "content_id": "767900f3d80a655c1fe37dd5d1ff91db850c9dff", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "permissive", "max_line_length": 67, "num_lines": 12, "path": "/setup.py", "repo_name": "fmcevoy/playeah", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\nsetup(\n name = \"playeah\",\n description = 'playeah',\n version = \"0.1\",\n author = 'Fintan McEvoy',\n author_email = 'fintan.mcevoy@gmail.com',\n url = 'https://github.com/fmcevoy/playeah',\n download_url ='https://github.com/fmcevoy/playeah/tarball/0.1',\n package_dir = {'':'lib'},\n packages = find_packages('lib'),\n)\n" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.640625, "avg_line_length": 31, "blob_id": "76cda8c4d5a55f66bf07c7213748dd41453747f9", "content_id": "ab511f68181ac7d43d14d8dcf7cac48637ee89dc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 64, "license_type": "permissive", "max_line_length": 37, "num_lines": 2, "path": "/Dockerfile", "repo_name": "fmcevoy/playeah", "src_encoding": "UTF-8", "text": "FROM python:3.4.3-onbuild\nCMD [ \"python\", \"./bin/playeah.py\" ]\n" }, { "alpha_fraction": 0.44915252923965454, "alphanum_fraction": 0.6864407062530518, "avg_line_length": 13.75, "blob_id": "54cdd48843e67ae1c5a641b7d54813690f5feaba", "content_id": "b1061f91c1fba6b12dbd769b588b5ce414ff3b7a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 118, "license_type": "permissive", "max_line_length": 18, "num_lines": 8, "path": "/requirements.txt", "repo_name": "fmcevoy/playeah", "src_encoding": "UTF-8", "text": "Flask==0.10.1\nitsdangerous==0.24\nJinja2==2.8\nMarkupSafe==0.23\npy==1.4.30\npytest==2.8.2\nWerkzeug==0.10.4\nwheel==0.24.0\n" }, { "alpha_fraction": 0.5773195624351501, "alphanum_fraction": 0.6185566782951355, "avg_line_length": 12.857142448425293, "blob_id": "b1b0225d923ec56c3b513e6713b41c4325af16c9", "content_id": "acfe9bbc55e16fb1206cbbc2626a40b76a342b98", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "permissive", "max_line_length": 24, "num_lines": 7, "path": "/test/test_playeah.py", "repo_name": "fmcevoy/playeah", "src_encoding": "UTF-8", "text": "import pytest\n\ndef test_nothing():\n assert 1 == 1\n\ndef test_nothing_else():\n assert 2 == 2\n" } ]
4
splietker/r0vert_ros
https://github.com/splietker/r0vert_ros
ff8cf144826972ea79be5c353978859dc7ef6341
a162e77231e027332be75c68d55ac6ef3eebef0e
0ae173163204a9dcd395e52cdd0c4aecded99d49
refs/heads/master
2021-01-13T15:04:40.798594
2017-06-18T16:36:14
2017-06-18T16:36:14
76,276,612
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6779081225395203, "alphanum_fraction": 0.6849951148033142, "avg_line_length": 33.38655471801758, "blob_id": "9f53ccc66135f27154d4b55a6137b9ad2f02145f", "content_id": "a594e41e3ece9691ce1966514f8576dd17a9c6c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4092, "license_type": "no_license", "max_line_length": 105, "num_lines": 119, "path": "/r0vert_teleop/src/teleop_joy.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2016, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include <ros/ros.h>\n#include <sensor_msgs/Joy.h>\n#include <turtle_actionlib/Velocity.h>\n#include <chrono>\n\nusing namespace std::chrono;\n\nclass TeleopJoy\n{\npublic:\n TeleopJoy();\n\n enum Mode {\n OneStick = 0,\n TwoStick,\n Tank\n };\n\nprivate:\n void JoyCallback(const sensor_msgs::Joy::ConstPtr &ptr);\n\n ros::NodeHandle n_;\n\n ros::Publisher velocity_pub_;\n ros::Subscriber joystick_sub_;\n int axis_angular_, axis_linear_, axis_throttle_;\n double max_speed_;\n\n Mode mode_;\n std::chrono::system_clock::time_point last_mode_change_;\n};\n\nTeleopJoy::TeleopJoy() :\n mode_(TwoStick)\n{\n // Create joystick axis mapping parameters\n ros::NodeHandle private_node_handle(\"~\");\n private_node_handle.param(\"axis_angular\", axis_angular_, 0);\n private_node_handle.param(\"axis_linear\", axis_linear_, 1);\n private_node_handle.param(\"axis_throttle\", axis_throttle_, 2);\n private_node_handle.param(\"max_speed\", max_speed_, 1.0);\n\n velocity_pub_ = n_.advertise<turtle_actionlib::Velocity>(\"velocity\", 1);\n joystick_sub_ = n_.subscribe<sensor_msgs::Joy>(\"joy\", 10, &TeleopJoy::JoyCallback, this);\n}\n\nvoid TeleopJoy::JoyCallback(const sensor_msgs::Joy::ConstPtr &ptr)\n{\n if (ptr->buttons[12])\n {\n system_clock::time_point now = system_clock::now();\n milliseconds diff = duration_cast<milliseconds>(now - last_mode_change_);\n if (diff.count() > 200)\n {\n mode_ = static_cast<Mode>((mode_ + 1) % 3);\n ROS_INFO(\"New mode: %d\", mode_);\n last_mode_change_ = now;\n }\n }\n\n if (mode_ == OneStick)\n {\n //n_.getParam(\"max_speed\", max_speed_);\n ROS_DEBUG(\"Received: v=%f, a=%f\", (float) ptr->axes[axis_linear_], (float) ptr->axes[axis_angular_]);\n turtle_actionlib::Velocity velocity;\n velocity.linear = (float) ptr->axes[axis_linear_] * (float) max_speed_;\n velocity.angular = (float) ptr->axes[axis_angular_] * (float) max_speed_;\n velocity_pub_.publish(velocity);\n }\n else if (mode_ == TwoStick)\n {\n turtle_actionlib::Velocity velocity;\n velocity.linear = (float) ptr->axes[1] * (float) max_speed_;\n velocity.angular = (float) ptr->axes[2] * (float) max_speed_;\n velocity_pub_.publish(velocity);\n }\n else if (mode_ == Tank)\n {\n turtle_actionlib::Velocity velocity;\n velocity.linear = ((float) ptr->axes[3] + ptr->axes[1]) * (float) max_speed_ / 2;\n velocity.angular = ((float) ptr->axes[3] - ptr->axes[1]) * (float) max_speed_ / 2;\n velocity_pub_.publish(velocity);\n }\n}\n\nint main(int argc, char *argv[])\n{\n ros::init(argc, argv, \"r0vert_teleop\");\n\n TeleopJoy teleopJoy;\n\n ros::spin();\n}\n" }, { "alpha_fraction": 0.7351694703102112, "alphanum_fraction": 0.7365819215774536, "avg_line_length": 20.454545974731445, "blob_id": "4e7fc2d5bb7bc28333ca53ccfda1758bbca9298e", "content_id": "f500aec086a739f8b1b7663aa611bce971ffa2be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1416, "license_type": "no_license", "max_line_length": 78, "num_lines": 66, "path": "/r0vert_sequence_recorder/include/sequence_recorder/image_sequence_recorder.h", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "#ifndef _CAMERA_RECORDER_IMAGE_SEQUENCE_RECORDER_H_\n#define _CAMERA_RECORDER_IMAGE_SEQUENCE_RECORDER_H_\n\n#include <atomic>\n#include <chrono>\n#include <thread>\n#include <vector>\n\n#include <boost/property_tree/ptree.hpp>\n#include <ros/ros.h>\n#include <sensor_msgs/Joy.h>\n#include <turtle_actionlib/Velocity.h>\n#include <r0vert_msgs/WheelVelocity.h>\n#include \"sequence.h\"\n\nnamespace pt = boost::property_tree;\n\nusing namespace std::chrono;\n\nnamespace sequence_recorder\n{\n\nclass ImageSequenceRecorder\n{\npublic:\n ImageSequenceRecorder(ros::NodeHandle &nh);\n\nprivate:\n void StartRecording();\n\n void StopRecording();\n\n void JoyCallback(const sensor_msgs::Joy::ConstPtr &ptr);\n\n void WheelVelocityCallback(const r0vert_msgs::WheelVelocity::ConstPtr &ptr);\n\n void Record();\n\n ros::NodeHandle &n_;\n ros::Subscriber joystick_sub_;\n ros::Subscriber wheel_velocity_sub_;\n\n std::thread recording_thread_;\n std::atomic_bool is_recording_;\n\n std::unique_ptr<Sequence> sequence_;\n\n // Variables for measuring fps\n system_clock::time_point start_time_;\n system_clock::time_point last_frame_time_;\n milliseconds frame_time_min_;\n milliseconds frame_time_max_;\n\n // Configuration parameters\n int capture_device_;\n std::string output_directory_;\n int image_width_;\n int image_height_;\n int framerate_;\n bool grayscale_;\n\n};\n\n} // namespace sequence_recorder\n\n#endif // _CAMERA_RECORDER_IMAGE_SEQUENCE_RECORDER_H_\n" }, { "alpha_fraction": 0.6866438388824463, "alphanum_fraction": 0.6883561611175537, "avg_line_length": 20.629629135131836, "blob_id": "926524de8ac7b9dd7aaad2422e2f220ba873ef11", "content_id": "9b70fade317c55277e45f983124461b4c9bb8ca4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 584, "license_type": "no_license", "max_line_length": 54, "num_lines": 27, "path": "/r0vert_sequence_recorder/src/sequence_recorder_node.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "#include <ros/ros.h>\n#include <sensor_msgs/Joy.h>\n#include <sequence_recorder/still_sequence_recorder.h>\n#include <sequence_recorder/image_sequence_recorder.h>\n\nusing namespace sequence_recorder;\n\nint main(int argc, char *argv[])\n{\n ros::init(argc, argv, \"sequence_recorder\");\n\n ros::NodeHandle nh(\"~\");\n bool still_images;\n nh.param<bool>(\"still_images\", still_images, false);\n if (still_images)\n {\n StillSequenceRecorder still_sequence_recorder(nh);\n ros::spin();\n }\n else\n {\n ImageSequenceRecorder image_sequence_recorder(nh);\n ros::spin();\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.7706422209739685, "alphanum_fraction": 0.8073394298553467, "avg_line_length": 26.25, "blob_id": "9a5d83096b0bb6ae9a7d17abe1cecec145a2cbad", "content_id": "09cf1c9f7f48710a6541a280282da301c081ade9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 109, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/r0vert_ros/CMakeLists.txt", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(r0vert_ros)\nfind_package(catkin REQUIRED)\ncatkin_metapackage()\n" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 28, "blob_id": "69f237db8b12dbef26af668f7ff2d92819ce6f74", "content_id": "c9015d6eecb0d98be473835fcb74667ff791d6e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/r0vert_panel/src/r0vert_panel/__init__.py", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "from rotary_encoder import *\n" }, { "alpha_fraction": 0.670691192150116, "alphanum_fraction": 0.68013995885849, "avg_line_length": 31.662857055664062, "blob_id": "84889aa21f1c287e0e1b5be8e36944ef4c5f44d2", "content_id": "6eb3c9714a2764d13e2fed47d49f3b2b1f139769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5715, "license_type": "no_license", "max_line_length": 110, "num_lines": 175, "path": "/r0vert_sequence_recorder/src/still_sequence_recorder.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include <chrono>\n#include <thread>\n#include <boost/filesystem/path.hpp>\n#include <boost/filesystem/operations.hpp>\n#include <boost/format.hpp>\n#include <sensor_msgs/Joy.h>\n\n#include<sequence_recorder/still_sequence_recorder.h>\n\nnamespace fs = boost::filesystem;\n\nusing namespace std::chrono;\n\nnamespace sequence_recorder\n{\n\nStillSequenceRecorder::StillSequenceRecorder(ros::NodeHandle &nh)\n : n_(nh), recording_(false), robot_stopped_(false)\n{\n n_.param<int>(\"capture_device\", capture_device_, 0);\n n_.param<std::string>(\"output_directory\", output_directory_, \"/tmp/recordings\");\n n_.param<int>(\"image_width\", image_width_, 640);\n n_.param<int>(\"image_height\", image_height_, 480);\n n_.param<bool>(\"grayscale\", grayscale_, false);\n\n joystick_sub_ = n_.subscribe<sensor_msgs::Joy>(\n \"/joy\", 10, &StillSequenceRecorder::JoyCallback, this);\n velocity_intercept_sub_ = n_.subscribe<turtle_actionlib::Velocity>(\n \"/velocity_intercept\", 10, &StillSequenceRecorder::VelocityInterceptCallback, this);\n wheel_velocity_sub_ = n_.subscribe<r0vert_msgs::WheelVelocity>(\n \"/wheel_velocity\", 10, &StillSequenceRecorder::WheelVelocityCallback, this);\n\n velocity_pub_ = n_.advertise<turtle_actionlib::Velocity>(\"/velocity\", 10);\n}\n\n\nvoid StillSequenceRecorder::VelocityInterceptCallback(const turtle_actionlib::Velocity::ConstPtr &ptr)\n{\n if (not robot_stopped_)\n {\n velocity_pub_.publish(ptr);\n }\n}\n\nvoid StillSequenceRecorder::JoyCallback(const sensor_msgs::Joy::ConstPtr &ptr)\n{\n if (ptr->buttons[14] and not recording_)\n {\n sequence_ = std::make_unique<Sequence>(output_directory_, grayscale_);\n robot_stopped_ = false;\n recording_ = true;\n\n recording_thread_ = std::thread(&StillSequenceRecorder::ImageRecordingThread, this);\n\n ROS_INFO(\"Recording started\");\n }\n else if (ptr->buttons[15] and recording_)\n {\n recording_ = false;\n robot_stopped_ = false;\n sequence_->WriteMetadata();\n\n recording_thread_.join();\n ROS_INFO(\"Recording stopped\");\n }\n}\n\nvoid StillSequenceRecorder::WheelVelocityCallback(const r0vert_msgs::WheelVelocity::ConstPtr &ptr)\n{\n static high_resolution_clock::time_point last_callback = high_resolution_clock::now();\n high_resolution_clock::time_point now = high_resolution_clock::now();\n\n if (recording_)\n {\n sequence_->AddWheelVelocity(ptr);\n if (not robot_stopped_)\n {\n duration<double> time_diff = now - last_callback;\n double forward_velocity = 0.032 / 2 * (ptr->right + ptr->left);\n double turn_velocity = 0.032 / 0.18 * (ptr->right - ptr->left);\n traveled_distance_ += std::abs(ptr->time * forward_velocity);\n turned_angle_ += std::abs(ptr->time * turn_velocity);\n }\n\n if (traveled_distance_ > 0.02 or turned_angle_ > 0.03)\n {\n TakePicture();\n traveled_distance_ = 0;\n turned_angle_ = 0;\n }\n }\n\n last_callback = now;\n}\n\nvoid StillSequenceRecorder::TakePicture()\n{\n // Stop robot\n robot_stopped_ = true;\n turtle_actionlib::Velocity velocity;\n velocity.angular = 0;\n velocity.linear = 0;\n velocity_pub_.publish(velocity);\n}\n\nvoid StillSequenceRecorder::ImageRecordingThread()\n{\n capture_.open(0);\n capture_.set(CV_CAP_PROP_FRAME_WIDTH, image_width_);\n capture_.set(CV_CAP_PROP_FRAME_HEIGHT, image_height_);\n capture_.set(CV_CAP_PROP_FPS, 5);\n\n cv::Mat frame;\n while (recording_)\n {\n capture_ >> frame;\n\n if (robot_stopped_)\n {\n // Wait for robot to stop\n std::this_thread::sleep_for(std::chrono::milliseconds(200));\n\n // Clear camera buffer\n while (true)\n {\n system_clock::time_point time_start = system_clock::now();\n capture_ >> frame;\n std::cout << duration<double>(system_clock::now() - time_start).count() << \", \"\n << capture_.get(cv::CAP_PROP_FPS)\n << std::endl;\n if (duration<double>(system_clock::now() - time_start).count() * capture_.get(cv::CAP_PROP_FPS) > 0.5)\n {\n break;\n }\n }\n\n // Store newest image\n capture_ >> frame;\n sequence_->AddFrame(frame);\n\n // Resume driving\n robot_stopped_ = false;\n }\n }\n\n capture_.release();\n}\n\n} // namespace sequence_recorder" }, { "alpha_fraction": 0.6960070729255676, "alphanum_fraction": 0.7162377834320068, "avg_line_length": 28.973403930664062, "blob_id": "5b85456155fea464167a34a7fd1f90e1f5f1de16", "content_id": "65f07290e691e1272c6762f5d4a0f0d07cb78024", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5635, "license_type": "no_license", "max_line_length": 96, "num_lines": 188, "path": "/r0vert_firmware/src/main.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2016, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include <Arduino.h>\n\n#include <ros.h>\n#include <turtle_actionlib/Velocity.h>\n#include <r0vert_msgs/BatteryVoltage.h>\n#include <r0vert_msgs/WheelVelocity.h>\n\n#include \"motor.h\"\n#include \"battery.h\"\n#include \"timer.h\"\n#include \"wheel_encoder.h\"\n\n\nros::NodeHandle nh;\n\n// Subscribers\nvoid velocity_callback(const turtle_actionlib::Velocity &velocity_msg);\n\nros::Subscriber<turtle_actionlib::Velocity> velocity_subscriber(\"velocity\", &velocity_callback);\n\n// Publishers\nr0vert_msgs::BatteryVoltage battery_msg;\nros::Publisher battery_publisher(\"battery\", &battery_msg);\nr0vert_msgs::WheelVelocity velocity_msg;\nros::Publisher velocity_publisher(\"wheel_velocity\", &velocity_msg);\n\n// Publishing timers\nvoid publish_status();\n\nTimer publish_status_timer(1000, &publish_status);\n\n// Publishing timers\nvoid publish_velocity();\n\nTimer publish_velocity_timer(50, &publish_velocity);\n\n// Hardware\nMotor motor_left(6, 7);\nMotor motor_right(4, 5);\n\nPIDController controller_left(&motor_left);\nPIDController controller_right(&motor_right);\n\nWheelEncoder encoder_left(50, 52, 53);\nWheelEncoder encoder_right(50, 52, 51, true);\n\nBattery battery1(A0, 15.275);\nBattery battery2(A1, 15.218);\n//14.861825541788024\n\nvoid velocity_callback(const turtle_actionlib::Velocity &velocity_msg)\n{\n float hypotenuse = sqrt(pow(velocity_msg.linear, 2) + pow(velocity_msg.angular, 2));\n float angle;\n if (hypotenuse == 0)\n {\n angle = PI / 2;\n }\n else\n {\n float normalized_x = -velocity_msg.angular / hypotenuse;\n angle = acos(normalized_x);\n }\n\n float speed = min(hypotenuse, 1);\n\n // Apply exponential function for better steerability\n speed = pow(2, speed) - 1;\n\n float heading = (angle - PI / 2) / (PI / 2);\n\n if (heading > 0 and velocity_msg.linear > 0)\n {\n controller_left.SetSpeed((1 - 2 * heading) * speed);\n controller_right.SetSpeed(speed);\n }\n else if (heading > 0 and velocity_msg.linear <= 0)\n {\n controller_left.SetSpeed(-speed);\n controller_right.SetSpeed((-1 + 2 * heading) * speed);\n }\n else if (heading <= 0 and velocity_msg.linear > 0)\n {\n controller_left.SetSpeed(speed);\n controller_right.SetSpeed((1 + 2 * heading) * speed);\n }\n else if (heading <= 0 and velocity_msg.linear <= 0)\n {\n controller_left.SetSpeed((-1 - 2 * heading) * speed);\n controller_right.SetSpeed(-speed);\n }\n}\n\nvoid publish_status()\n{\n battery_msg.battery1 = battery1.Voltage();\n battery_msg.battery2 = battery2.Voltage();\n battery_publisher.publish(&battery_msg);\n}\n\nvoid publish_velocity()\n{\n static const double velocity_threshold = 0.02;\n static unsigned long last_publish_time = micros();\n unsigned long current_time = micros();\n double time_diff = (current_time - last_publish_time) * 1e-6;\n velocity_msg.left = encoder_left.IncrementalDiff() / time_diff;\n velocity_msg.right = encoder_right.IncrementalDiff() / time_diff;\n velocity_msg.time = time_diff;\n\n if (controller_left.set_speed() == 0 and controller_right.set_speed() == 0\n and fabs(velocity_msg.left) < velocity_threshold\n and fabs(velocity_msg.right) < velocity_threshold)\n { // If motors set to stop and measured velocity close to 0\n velocity_msg.left = 0;\n velocity_msg.right = 0;\n }\n\n velocity_publisher.publish(&velocity_msg);\n\n last_publish_time = current_time;\n}\n\nvoid motor_control()\n{\n double velocity_left = encoder_left.Velocity();\n double velocity_right = encoder_right.Velocity();\n controller_left.EncoderUpdate(velocity_left);\n controller_right.EncoderUpdate(velocity_right);\n\n velocity_msg.left += velocity_left;\n velocity_msg.right += velocity_right;\n}\n\nvoid setup()\n{\n Serial.begin(115200);\n nh.getHardware()->setBaud(115200);\n nh.initNode();\n nh.subscribe(velocity_subscriber);\n nh.advertise(battery_publisher);\n nh.advertise(velocity_publisher);\n\n encoder_left.Init();\n encoder_right.Init();\n controller_left.Init();\n controller_right.Init();\n}\n\nvoid loop()\n{\n nh.spinOnce();\n\n encoder_left.Update();\n encoder_right.Update();\n battery1.Update();\n battery2.Update();\n\n publish_status_timer.Update();\n publish_velocity_timer.Update();\n motor_control();\n}\n" }, { "alpha_fraction": 0.7022873759269714, "alphanum_fraction": 0.7055039405822754, "avg_line_length": 25.657142639160156, "blob_id": "aa2ccbd97c9d39dcf0e05a796efb536b6689f493", "content_id": "c2d94a30a892291af061a648f1590f6f97568666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2798, "license_type": "no_license", "max_line_length": 119, "num_lines": 105, "path": "/r0vert_firmware/src/motor.h", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2016, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef _MOTOR_MOTOR_H_\n#define _MOTOR_MOTOR_H_\n\n#include \"PID.h\"\n\n/**\n * An H-Bride driven motor.\n *\n * Implementation for driving a motor driven by an H-Bridge (e.g. LM298). The H-Bridge needs to be connected to two PWM\n * pins (pin_a and pin_b).\n */\nclass Motor\n{\npublic:\n Motor(int pin_a, int pin_b);\n\n ~Motor();\n\n /**\n * Sets the speed of the motor.\n * Sets the new speed and direction values and applies the changes to the pins. If the given value is positiv the\n * new direction is FORWARD, BACKWARD otherwise.\n *\n * @param speed Value between -1 and 1.\n */\n void SetSpeed(float speed);\n\nprivate:\n /**\n * Direction of rotation.\n */\n enum Direction\n {\n FORWARD,\n BACKWARD\n };\n\n static const float min_duty_cycle;\n static const float max_duty_cycle;\n\n /**\n * Applies direction and speed to the pins.\n */\n void Write();\n\n int pin_a_;\n int pin_b_;\n float speed_;\n Direction direction_;\n};\n\nclass PIDController\n{\npublic:\n PIDController(Motor *motor);\n\n void Init();\n\n void SetSpeed(double speed);\n\n void EncoderUpdate(double value);\n\n const double set_speed() const;\n\nprivate:\n Motor *motor_;\n\n PID controller_;\n\n double input_;\n\n double output_;\n\n double set_speed_;\n\n int direction_;\n};\n\n#endif /* _MOTOR_MOTOR_H_ */" }, { "alpha_fraction": 0.7087719440460205, "alphanum_fraction": 0.719298243522644, "avg_line_length": 34.625, "blob_id": "2bfbf8729342624a990c601bc1292354dc2c186c", "content_id": "5832d06920ac29850346471dfd1c77fface84405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2280, "license_type": "no_license", "max_line_length": 80, "num_lines": 64, "path": "/r0vert_firmware/src/battery.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include \"battery.h\"\n\nBattery::Battery(uint8_t pin, float conversion_factor)\n : pin_(pin), conversion_factor_(conversion_factor), value_(-1.0)\n{\n\n}\n\n// Lower bound under which measurements are no longer considered valide (~3.8V)\nconst uint16_t measurement_threshold = 250;\n\nvoid Battery::Update()\n{\n int current_value = analogRead(pin_);\n if (current_value < measurement_threshold)\n {\n value_ = 0;\n return;\n }\n if (value_ <= measurement_threshold)\n {\n value_ = current_value;\n return;\n }\n\n // Smoothing factor\n const float alpha = 0.1;\n value_= alpha * current_value + (1 - alpha) * value_;\n}\n\nfloat Battery::Voltage()\n{\n if (value_ < measurement_threshold)\n {\n return 0;\n }\n return conversion_factor_ * value_ / 1024.0;\n}\n" }, { "alpha_fraction": 0.7340425252914429, "alphanum_fraction": 0.7765957713127136, "avg_line_length": 14.666666984558105, "blob_id": "181aa90ef9b4b9da3f6c5632569c98e5e721db03", "content_id": "d7c6bf49501711d04401083c9a45d5727fecf92e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 94, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/r0vert/CMakeLists.txt", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(r0vert)\n\nfind_package(catkin)\n\ncatkin_package()\n" }, { "alpha_fraction": 0.6407886743545532, "alphanum_fraction": 0.6608133316040039, "avg_line_length": 23.96923065185547, "blob_id": "2f0a1bb60beb1b6ac2e64ae8e7afec9fbacd8ccb", "content_id": "602eb026bb08a81836de858d7539ac1ac39a9e24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3246, "license_type": "no_license", "max_line_length": 90, "num_lines": 130, "path": "/r0vert_firmware/src/motor.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2016, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include \"motor.h\"\n\n#include <Arduino.h>\n\n/* Min duty cycle depending on voltage:\n * Voltage | Min Duty Cycle\n * --------+----------------\n * 5V | 0.48\n * 7.4V | 0.28\n * 9V | 0.21\n * 11.1V | 0.16\n * 12V | 0.15\n */\n\nconst float Motor::min_duty_cycle = 0.16;\n\nconst float Motor::max_duty_cycle = 0.9;\n\nMotor::Motor(int pin_a, int pin_b) :\n pin_a_(pin_a), pin_b_(pin_b), speed_(0), direction_(Motor::FORWARD)\n{\n Write();\n}\n\nMotor::~Motor()\n{}\n\nvoid Motor::SetSpeed(float speed)\n{\n float abs_speed = fabs(speed);\n if (abs_speed > 1)\n {\n abs_speed = 1;\n }\n else if (speed >= 0)\n {\n direction_ = Motor::FORWARD;\n }\n else\n {\n direction_ = Motor::BACKWARD;\n }\n speed_ = abs_speed;\n\n Write();\n}\n\nvoid Motor::Write()\n{\n float actual_duty_cycle = (min_duty_cycle + speed_ * (max_duty_cycle - min_duty_cycle));\n uint8_t pwm_value = (uint8_t) (255 * actual_duty_cycle);\n\n if (speed_ == 0)\n {\n pwm_value = 0;\n }\n\n if (direction_ == Motor::FORWARD)\n {\n analogWrite(pin_a_, pwm_value);\n analogWrite(pin_b_, 0);\n }\n else\n {\n analogWrite(pin_a_, 0);\n analogWrite(pin_b_, pwm_value);\n }\n}\n\nPIDController::PIDController(Motor *motor)\n : motor_(motor),\n controller_(0.5, 0.55, 0.000),\n input_(0), output_(0), set_speed_(0), direction_(1)\n{\n\n}\n\nvoid PIDController::Init()\n{\n}\n\nvoid PIDController::EncoderUpdate(double value)\n{\n controller_.Update(value);\n if (fabs(set_speed_) >= 0.001)\n {\n motor_->SetSpeed(controller_.output());\n }\n else\n {\n motor_->SetSpeed(0);\n }\n}\n\nvoid PIDController::SetSpeed(double speed)\n{\n set_speed_ = speed;\n controller_.setpoint(speed * 4);\n}\n\nconst double PIDController::set_speed() const\n{\n return set_speed_;\n}\n" }, { "alpha_fraction": 0.7769784331321716, "alphanum_fraction": 0.7913669347763062, "avg_line_length": 15.352941513061523, "blob_id": "f720d1183f93f82603468ebfdc15d52e15258d90", "content_id": "f9b980525614177c59d3b1c354736d9f8482172b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 278, "license_type": "no_license", "max_line_length": 46, "num_lines": 17, "path": "/r0vert_msgs/CMakeLists.txt", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(r0vert_msgs)\n\nfind_package(catkin REQUIRED COMPONENTS\n message_generation\n)\n\nadd_message_files(\n FILES\n BatteryVoltage.msg\n WheelVelocity.msg\n)\n\ngenerate_messages(\n DEPENDENCIES\n)\ncatkin_package(CATKIN_DEPENDS message_runtime)\n" }, { "alpha_fraction": 0.7436363697052002, "alphanum_fraction": 0.7454545497894287, "avg_line_length": 32, "blob_id": "afca24c56af66cbc7e124fd84a1cc0962e9d4fc1", "content_id": "4088b6dad5b84da2624da16d65bd13a36568201c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3300, "license_type": "no_license", "max_line_length": 92, "num_lines": 100, "path": "/r0vert_sequence_recorder/include/sequence_recorder/still_sequence_recorder.h", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef CAMERA_RECORDER_STILL_SEQUENCE_RECORDER_H\n#define CAMERA_RECORDER_STILL_SEQUENCE_RECORDER_H\n\n\n#include <atomic>\n\n#include <ros/subscriber.h>\n#include <ros/publisher.h>\n#include <ros/ros.h>\n#include <boost/property_tree/ptree.hpp>\n#include <r0vert_msgs/WheelVelocity.h>\n#include <sensor_msgs/Joy.h>\n#include <turtle_actionlib/Velocity.h>\n#include <thread>\n#include \"sequence.h\"\n\nnamespace pt = boost::property_tree;\n\nnamespace sequence_recorder\n{\n\n/**\n * Sequence recorder that stops the robot before taking a picture.\n *\n * The Recorder tracks the traveled distance (odometry). If the distance exceeds a threshold\n * the robot is stopped, an picture is taken and the robot continues.\n */\nclass StillSequenceRecorder\n{\npublic:\n StillSequenceRecorder(ros::NodeHandle &nh);\n\nprivate:\n void JoyCallback(const sensor_msgs::Joy::ConstPtr &ptr);\n\n void WheelVelocityCallback(const r0vert_msgs::WheelVelocity::ConstPtr &ptr);\n\n /**\n * Callback used to pass messages from \"/velocity_intercept\" to \"/velocity\"\n * @param ptr\n */\n void VelocityInterceptCallback(const turtle_actionlib::Velocity::ConstPtr &ptr);\n\n void ImageRecordingThread();\n\n void TakePicture();\n\n ros::NodeHandle &n_;\n\n ros::Subscriber joystick_sub_;\n ros::Subscriber wheel_velocity_sub_;\n ros::Subscriber velocity_intercept_sub_;\n ros::Publisher velocity_pub_;\n\n cv::VideoCapture capture_;\n std::unique_ptr<Sequence> sequence_;\n\n std::thread recording_thread_;\n std::atomic_bool recording_;\n std::atomic_bool robot_stopped_;\n\n double traveled_distance_;\n double turned_angle_;\n\n int capture_device_;\n std::string output_directory_;\n int image_width_;\n int image_height_;\n bool grayscale_;\n};\n\n} // namespace sequence_recorder\n\n#endif //CAMERA_RECORDER_STILL_SEQUENCE_RECORDER_H\n" }, { "alpha_fraction": 0.5997328758239746, "alphanum_fraction": 0.606856644153595, "avg_line_length": 38.63529586791992, "blob_id": "d92b0e8bfbcb0324445f2d8fdad3d7f760172efe", "content_id": "d39a243b0447f1d2c0f470cb4a8b2d0ed499c631", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6738, "license_type": "no_license", "max_line_length": 118, "num_lines": 170, "path": "/r0vert_panel/src/r0vert_panel/menu.py", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "# Copyright (c) 2016, Malte Splietker\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The names of contributors may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom threading import Timer, Event\n\nfrom rotary_encoder import EventType, Direction\n\n\nclass MenuItem(object):\n def __init__(self, name, value=None):\n self.name = name\n self.value = value\n\n def value_string(self):\n return str(self.value)\n\n def update(self):\n pass\n\n\nclass Menu(MenuItem):\n def __init__(self, name, is_main_menu=False):\n # super(Menu, self).__init__(name)\n self.name = name\n self.items = list()\n\n self.is_main_menu = is_main_menu\n\n def add(self, menu_item):\n self.items.append(menu_item)\n\n\nclass MenuViewer:\n def __init__(self, lcd, rotary_encoder, menu, scroll_overflow=False):\n self.__lcd = lcd\n self.__menu = menu\n self.__current_menu = self.__menu\n self.__scroll_overflow = scroll_overflow\n\n self.__selected_item = 0\n self.__selected_row = 0\n\n self.__max_name_length = 8\n self.__max_value_length = 6\n\n self.__name_scroll_index = 0\n self.__name_scroll_direction = 1\n self.__name_scroll_skip = 3\n self.__value_scroll_index = 0\n self.__value_scroll_direction = 1\n self.__value_scroll_skip = 3\n\n self.__rotary_encoder = rotary_encoder\n self.__rotary_encoder.register_callback(EventType.ROTATION, self.__rotary_encoder_rotation)\n self.__rotary_encoder.register_callback(EventType.BUTTON, self.__rotary_encoder_button)\n\n self.__stopped = False\n\n Timer(1.0, self.scroll).start()\n\n def stop(self):\n self.__stopped = True\n\n def __rotary_encoder_rotation(self, direction):\n prior_item = self.__selected_item\n if len(self.__current_menu.items) == 0:\n return\n self.__selected_row += direction\n self.__selected_row = min(self.__selected_row, self.__lcd._lines - 1)\n self.__selected_row = max(self.__selected_row, 0)\n\n self.__selected_item += direction\n if self.__scroll_overflow:\n if self.__selected_item >= len(self.__current_menu.items):\n self.__selected_item = 0\n self.__selected_row = 0\n elif self.__selected_item < 0:\n self.__selected_item = len(self.__current_menu.items) - 1\n self.__selected_row = self.__lcd._lines - 1\n else:\n self.__selected_item = min(self.__selected_item, len(self.__current_menu.items) - 1)\n self.__selected_item = max(self.__selected_item, 0)\n\n if prior_item != self.__selected_item:\n # Reset scrolling\n self.__name_scroll_index = 0\n self.__name_scroll_direction = 1\n self.__name_scroll_skip = 3\n self.__value_scroll_index = 0\n self.__value_scroll_direction = 1\n self.__value_scroll_skip = 3\n\n self.show()\n\n def __rotary_encoder_button(self, state):\n self.show()\n\n def scroll(self):\n item = self.__menu.items[self.__selected_item]\n name_length = len(item.name)\n if self.__name_scroll_skip > 0:\n self.__name_scroll_skip -= 1\n elif name_length > self.__max_name_length:\n self.__name_scroll_index += self.__name_scroll_direction\n if self.__max_name_length + self.__name_scroll_index >= name_length or self.__name_scroll_index == 0:\n self.__name_scroll_direction *= -1\n self.__name_scroll_skip = 3\n name = item.name[self.__name_scroll_index:self.__max_name_length + self.__name_scroll_index]\n self.__lcd.set_cursor(1, self.__selected_row)\n self.__lcd.message(name)\n\n value_length = len(item.value_string())\n if self.__value_scroll_skip > 0:\n self.__value_scroll_skip -= 1\n elif value_length > self.__max_value_length:\n self.__value_scroll_index += self.__value_scroll_direction\n if self.__max_value_length + self.__value_scroll_index >= value_length or self.__value_scroll_index == 0:\n self.__value_scroll_direction *= -1\n self.__value_scroll_skip = 3\n value = item.value_string()[self.__value_scroll_index:self.__max_value_length + self.__value_scroll_index]\n self.__lcd.set_cursor(self.__lcd._cols - self.__max_value_length, self.__selected_row)\n self.__lcd.message(value)\n\n if not self.__stopped:\n Timer(0.5, self.scroll).start()\n\n def show(self):\n self.__menu.update()\n self.__lcd.clear()\n lower_index = self.__selected_item - self.__selected_row\n row = 0\n for index in range(lower_index, lower_index + self.__lcd._lines):\n item = self.__menu.items[index]\n if row == self.__selected_row:\n self.__lcd.message(\"~\") # ~ equals an arrow\n else:\n self.__lcd.message(\" \")\n\n self.__lcd.message(item.name[0:self.__max_name_length])\n\n value_string = str(item.value_string())\n value_length = min(len(value_string), self.__max_value_length)\n self.__lcd.set_cursor(self.__lcd._cols - value_length, row)\n self.__lcd.message(value_string)\n\n self.__lcd.message(\"\\n\")\n row += 1\n" }, { "alpha_fraction": 0.7477954030036926, "alphanum_fraction": 0.7504408955574036, "avg_line_length": 34.4375, "blob_id": "fa86ace286926c35ef2889ae059c260b58d390b2", "content_id": "ba1d008f0742627b21dcad263c21e9b887e27377", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2268, "license_type": "no_license", "max_line_length": 80, "num_lines": 64, "path": "/r0vert_sequence_recorder/include/sequence_recorder/sequence.h", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef CAMERA_RECORDER_SEQUENCE_H\n#define CAMERA_RECORDER_SEQUENCE_H\n\n#include <boost/filesystem.hpp>\n#include <boost/property_tree/ptree.hpp>\n#include <opencv/cv.hpp>\n#include <r0vert_msgs/WheelVelocity.h>\n\nnamespace fs = boost::filesystem;\nnamespace pt = boost::property_tree;\n\nnamespace sequence_recorder\n{\n\nclass Sequence\n{\npublic:\n Sequence(const std::string &output_directory, bool grayscale=true);\n\n void AddFrame(const cv::Mat &frame);\n\n void AddWheelVelocity(const r0vert_msgs::WheelVelocity::ConstPtr &velocity);\n\n void WriteMetadata();\n\nprivate:\n fs::path output_path_;\n\n bool grayscale_;\n\n pt::ptree wheel_velocity_tree_;\n\n pt::ptree frames_tree_;\n};\n\n} // namespace sequence_recorder\n\n#endif //CAMERA_RECORDER_SEQUENCE_H\n" }, { "alpha_fraction": 0.6694074869155884, "alphanum_fraction": 0.6798065304756165, "avg_line_length": 27.91608428955078, "blob_id": "77909f4ab934982df2f94e3c76407f03c47d7f5d", "content_id": "217ff5c1794795f286bf620fc7c5de4a681a8c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4135, "license_type": "no_license", "max_line_length": 79, "num_lines": 143, "path": "/r0vert_panel/nodes/panel.py", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# Copyright (c) 2016, Malte Splietker\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The names of contributors may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom threading import Thread, Event\nimport rospy\nimport RPi.GPIO as GPIO\n\nfrom r0vert_msgs.msg import BatteryVoltage\nfrom r0vert_panel import RotaryEncoder, EventType\nfrom r0vert_panel.menu import MenuViewer\nfrom r0vert_panel.status import Status\n\nimport Adafruit_CharLCD as LCD\n\nlcd_config = {\n \"rs\": 23,\n \"en\": 24,\n \"d4\": 25,\n \"d5\": 8,\n \"d6\": 7,\n \"d7\": 12,\n \"cols\": 16,\n \"lines\": 2,\n \"backlight\": 18,\n \"enable_pwm\": True\n}\n\nrotenc_config = {\n \"pin_a\": 13,\n \"pin_b\": 19,\n \"pin_sw\": 26\n}\n\nstatus = Status()\nviewer = None\nrotenc = None\nlcd = None\nbacklight_timer = None\n\n\nclass BackLightTimer(Thread):\n def __init__(self, timeout, lcd):\n Thread.__init__(self)\n self.timeout = timeout\n self.lcd = lcd\n self.timeout_event = Event()\n self.stop_event = Event()\n\n def stop(self):\n self.stop_event.set()\n self.timeout_event.set()\n\n def reset(self):\n lcd.set_backlight(0)\n self.timeout_event.set()\n\n def run(self):\n while True:\n self.timeout_event.wait(self.timeout)\n if self.stop_event.is_set():\n break\n if not self.timeout_event.is_set():\n lcd.set_backlight(1)\n else:\n self.timeout_event.clear()\n\n\ndef rotenc_callback(value):\n lcd.set_backlight(0);\n backlight_timer.reset()\n\n\ndef battery_callback(data):\n status.battery1.value = data.battery1\n status.battery2.value = data.battery2\n viewer.show()\n\n\ndef shutdown():\n lcd.clear()\n lcd.set_backlight(1)\n backlight_timer.stop()\n viewer.stop()\n GPIO.cleanup()\n\n\ndef main():\n # In ROS, nodes are uniquely named. If two nodes with the same\n # node are launched, the previous one is kicked off. The\n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaneously.\n rospy.init_node('panel', anonymous=True)\n rospy.on_shutdown(shutdown)\n\n global rotenc\n rotenc = RotaryEncoder(**rotenc_config)\n rotenc.register_callback(EventType.BUTTON, rotenc_callback)\n rotenc.register_callback(EventType.ROTATION, rotenc_callback)\n\n global lcd, backlight_timer\n lcd = LCD.Adafruit_CharLCD(**lcd_config)\n lcd.clear()\n lcd.set_backlight(0)\n backlight_timer = BackLightTimer(5, lcd)\n backlight_timer.start()\n\n global status, viewer\n viewer = MenuViewer(lcd, rotenc, status)\n viewer.show()\n\n rospy.Subscriber(\"battery\", BatteryVoltage, battery_callback)\n\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6508684158325195, "alphanum_fraction": 0.6649985313415527, "avg_line_length": 25.33333396911621, "blob_id": "7938546c52c5ffac821d6d94d2bbcb1c21c58af9", "content_id": "ecb093d38c9908dac3b2daa42022a43a6d6591f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3397, "license_type": "no_license", "max_line_length": 125, "num_lines": 129, "path": "/r0vert_sequence_recorder/src/image_sequence_recorder.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "#include <ctime>\n#include <chrono>\n#include <string>\n#include <boost/filesystem.hpp>\n#include <boost/format.hpp>\n#include <boost/property_tree/json_parser.hpp>\n#include <opencv/cv.hpp>\n\n#include <sequence_recorder/image_sequence_recorder.h>\n\nnamespace fs = boost::filesystem;\nusing namespace cv;\n\nnamespace sequence_recorder\n{\n\nImageSequenceRecorder::ImageSequenceRecorder(ros::NodeHandle &nh) :\n n_(nh), is_recording_(false)\n{\n n_.param<int>(\"capture_device\", capture_device_, 0);\n n_.param<std::string>(\"output_directory\", output_directory_, \"/tmp/recordings\");\n n_.param<int>(\"image_width\", image_width_, 640);\n n_.param<int>(\"image_height\", image_height_, 480);\n n_.param<int>(\"framerate\", framerate_, 15);\n n_.param<bool>(\"grayscale\", grayscale_, false);\n\n joystick_sub_ = n_.subscribe<sensor_msgs::Joy>(\n \"/joy\", 10, &ImageSequenceRecorder::JoyCallback, this);\n wheel_velocity_sub_ = n_.subscribe<r0vert_msgs::WheelVelocity>(\n \"/wheel_velocity\", 10, &ImageSequenceRecorder::WheelVelocityCallback, this);\n\n fs::path output_path(output_directory_);\n fs::create_directories(output_path);\n}\n\nvoid ImageSequenceRecorder::JoyCallback(const sensor_msgs::Joy::ConstPtr &ptr)\n{\n if (ptr->buttons[14])\n {\n StartRecording();\n }\n else if (ptr->buttons[15])\n {\n StopRecording();\n }\n}\n\nvoid ImageSequenceRecorder::StartRecording()\n{\n if (is_recording_)\n {\n return;\n }\n recording_thread_ = std::thread(&ImageSequenceRecorder::Record, this);\n ROS_INFO(\"Recording started\");\n}\n\nvoid ImageSequenceRecorder::StopRecording()\n{\n if (not is_recording_)\n {\n return;\n }\n is_recording_ = false;\n recording_thread_.join();\n ROS_INFO(\"Recording stopped\");\n}\n\nvoid ImageSequenceRecorder::Record()\n{\n is_recording_ = true;\n\n sequence_ = std::make_unique<Sequence>(output_directory_, grayscale_);\n\n std::string gst_pipeline = \"v4l2src ! video/x-raw,width=320,height=240,framerate=%d/1,format=RGB ! videoconvert ! appsink\";\n VideoCapture capture(boost::str(boost::format(gst_pipeline) % framerate_));\n assert(capture.isOpened());\n\n start_time_ = system_clock::now();\n frame_time_min_ = milliseconds::max();\n frame_time_max_ = milliseconds::min();\n unsigned long frame_count = 0;\n Mat frame;\n while (is_recording_)\n {\n capture >> frame;\n system_clock::time_point new_frame_time = system_clock::now();\n\n try\n {\n sequence_->AddFrame(frame);\n }\n catch (std::runtime_error &e)\n {\n ROS_ERROR(\"Error: exception while writing image: \", e.what());\n }\n\n milliseconds diff = duration_cast<milliseconds>(new_frame_time - last_frame_time_);\n if (diff < frame_time_min_)\n {\n frame_time_min_ = diff;\n }\n else if (diff > frame_time_max_)\n {\n frame_time_max_ = diff;\n }\n last_frame_time_ = new_frame_time;\n frame_count += 1;\n };\n\n milliseconds total_time = duration_cast<milliseconds>(system_clock::now() - start_time_);\n milliseconds frame_time_avg = total_time / frame_count;\n ROS_INFO(\"FPS avg: %.2f, min: %.2f, max: %.2f\",\n 1000.0 / frame_time_avg.count(),\n 1000.0 / frame_time_max_.count(),\n 1000.0 / frame_time_min_.count());\n\n sequence_->WriteMetadata();\n}\n\nvoid ImageSequenceRecorder::WheelVelocityCallback(const r0vert_msgs::WheelVelocity::ConstPtr &ptr)\n{\n if (is_recording_)\n {\n sequence_->AddWheelVelocity(ptr);\n }\n}\n\n} // namespace sequence_recorder\n" }, { "alpha_fraction": 0.6532917618751526, "alphanum_fraction": 0.6790027022361755, "avg_line_length": 24.929292678833008, "blob_id": "1dbc00e1ab92db269a62d410fc7d9f560982c675", "content_id": "c68f7044b32414b3b70960419d0a93abb070a98a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2567, "license_type": "no_license", "max_line_length": 80, "num_lines": 99, "path": "/r0vert_firmware/src/as5040.h", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef AS5040_AS5040_H\n#define AS5040_AS5040_H\n\n\n#include \"Arduino.h\"\n\nclass AS5040\n{\npublic:\n AS5040(uint8_t pin_DO, uint8_t pin_CLK, uint8_t pin_CS);\n\n void Init();\n\n uint16_t Read();\n\n uint16_t Value();\n\n bool IsValid();\n\n bool GetStatus(uint8_t field);\n\n enum Status\n {\n /**\n * Magnitude decreased (magnet pushed towards IC)\n */\n STATUS_MAG_DEC = 0,\n /**\n * Magnitude increased (magnet pulled away from IC)\n */\n STATUS_MAG_INC,\n /**\n * Linearity alarm (output linearity critical)\n */\n STATUS_LIN,\n /**\n * CORDIC Overflow (data is invalid)\n */\n STATUS_COF,\n /**\n * Offset Compensation Finished (AS5040 is ready)\n */\n STATUS_OCF\n };\n\nprivate:\n inline bool ParityValid(uint16_t data);\n\n /**\n * Last 16 bits read from the AS5040.\n */\n uint16_t last_data_;\n\n /**\n * Data output of the AS5040.\n */\n uint8_t pin_DO_;\n\n /**\n * Clock pin.\n */\n uint8_t pin_CLK_;\n\n /**\n * Chip select pin.\n */\n uint8_t pin_CS_;\n\n\n};\n\n\n#endif //AS5040_AS5040_H\n" }, { "alpha_fraction": 0.5769890546798706, "alphanum_fraction": 0.5859053730964661, "avg_line_length": 36.38461685180664, "blob_id": "cfa34dd0426fda78558728285e712b1c48443052", "content_id": "a0469667abef240c76a4f64f24c13f01621f4faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5832, "license_type": "no_license", "max_line_length": 108, "num_lines": 156, "path": "/r0vert_panel/src/r0vert_panel/rotary_encoder.py", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "# Copyright (c) 2016, Malte Splietker\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The names of contributors may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport RPi.GPIO as GPIO\nimport time\n\n\nclass Flank:\n NONE = 0\n RISING = 1\n FALLING = 2\n\n\nclass Direction:\n CCW = -1\n CW = 1\n\n\nclass EventType:\n ROTATION = 0\n BUTTON = 1\n\n\nclass RotaryEncoder:\n def __init__(self, pin_a, pin_b, pin_sw):\n self.__pin_a = pin_a\n self.__pin_b = pin_b\n self.__pin_sw = pin_sw\n\n self.__switch_state = 0\n\n self.__direction = 0\n self.__state = 0\n self.__fail_count = 0\n self.__state_pin_a = 0\n self.__state_pin_b = 0\n\n self.__callbacks = {\n EventType.ROTATION: [],\n EventType.BUTTON: []\n }\n\n GPIO.setmode(GPIO.BCM) # Numbers GPIOs by physical location\n GPIO.setup(self.__pin_a, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(self.__pin_b, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(self.__pin_sw, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect(self.__pin_a, GPIO.BOTH, callback=self.__rotation_callback, bouncetime=2)\n GPIO.add_event_detect(self.__pin_b, GPIO.BOTH, callback=self.__rotation_callback, bouncetime=2)\n GPIO.add_event_detect(self.__pin_sw, GPIO.BOTH, callback=self.__push_button_callback, bouncetime=20)\n\n def register_callback(self, event_type, callback):\n self.__callbacks[event_type].append(callback)\n\n def __perform_callback(self, event_type, value):\n for function in self.__callbacks[event_type]:\n function(value)\n\n def __update_state(self, channel, flank):\n if self.__fail_count >= 20:\n self.__state = 0\n self.__fail_count = 0\n\n if self.__state == 0 and flank == Flank.RISING:\n if channel == self.__pin_a:\n self.__direction = Direction.CW\n else:\n self.__direction = Direction.CCW\n elif self.__state == 1 and flank == Flank.RISING:\n if channel == self.__pin_a and self.__direction == Direction.CCW:\n pass\n elif channel == self.__pin_b and self.__direction == Direction.CW:\n pass\n else: # Error\n self.__fail_count += 1\n return\n elif self.__state == 2 and flank == Flank.FALLING:\n if channel == self.__pin_a and self.__direction == Direction.CW:\n pass\n elif channel == self.__pin_b and self.__direction == Direction.CCW:\n pass\n else: # Error\n self.__fail_count += 1\n return\n elif self.__state == 3 and flank == Flank.FALLING:\n if channel == self.__pin_a and self.__direction == Direction.CCW:\n pass\n elif channel == self.__pin_b and self.__direction == Direction.CW:\n pass\n else: # Error\n self.__fail_count += 1\n return\n else:\n self.__fail_count += 1\n return\n self.__state += 1\n\n if self.__state == 4:\n self.__state = 0\n self.__perform_callback(EventType.ROTATION, self.__direction)\n\n def __rotation_callback(self, channel):\n # Additional debounce time\n time.sleep(0.002)\n value = GPIO.input(channel)\n\n # Flank detection\n flank = Flank.NONE\n if channel == self.__pin_a:\n if self.__state_pin_a - value == 1: # Pin A RISE\n flank = Flank.RISING\n elif self.__state_pin_a - value == -1: # Pin A FALL\n flank = Flank.FALLING\n self.__state_pin_a = value\n elif channel == self.__pin_b:\n if self.__state_pin_b - value == 1: # Pin B RISE\n flank = Flank.RISING\n elif self.__state_pin_b - value == -1: # Pin B FALL\n flank = Flank.FALLING\n self.__state_pin_b = value\n\n if flank == Flank.NONE:\n return\n\n self.__update_state(channel, flank)\n\n def __push_button_callback(self, channel):\n value = GPIO.input(channel)\n if value == 0 and self.__switch_state == 0:\n self.__switch_state = 1\n self.__perform_callback(EventType.BUTTON, 1)\n elif value == 1 and self.__switch_state == 1:\n self.__switch_state = 0\n self.__perform_callback(EventType.BUTTON, 0)\n" }, { "alpha_fraction": 0.7265182733535767, "alphanum_fraction": 0.7333081960678101, "avg_line_length": 33.42856979370117, "blob_id": "2cea035bb6c1b339608f022a4b6f4a9d5f08f538", "content_id": "39bad38e8eede219a98540a85428c00323766b01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2651, "license_type": "no_license", "max_line_length": 112, "num_lines": 77, "path": "/r0vert_firmware/src/wheel_encoder.h", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef R0VERT_FIRMWARE_WHEEL_ENCODER_H\n#define R0VERT_FIRMWARE_WHEEL_ENCODER_H\n\n\n#include <Arduino.h>\n#include \"as5040.h\"\n\nclass WheelEncoder\n{\npublic:\n WheelEncoder(uint8_t pin_DO, uint8_t pin_CLK, uint8_t pin_CS, bool inverted=false);\n\n void Init();\n\n void Update();\n\n /**\n * Calculates the current rotation velocity (in turns/s).\n *\n * Calculates the rotation velocity as observed since the last call. The time between consecutive calls should\n * not be too long as the readings could be erroneous otherwise.\n * @return Current rotation velocity.\n */\n double Velocity();\n\n /**\n * Calculates the diff since the last time this method was called (in turns)\n *\n * @return Rotational diff since last call.\n */\n double IncrementalDiff();\n\nprivate:\n AS5040 sensor_;\n\n /**\n * Denotes if the velocity reading should be reversed (e.g. if mounted on the other side)\n */\n bool inverted_;\n\n double last_incremental_diff_sensor_value_;\n\n double last_velocity_sensor_value_;\n\n unsigned long last_velocity_calculation_time_;\n\n double velocity_turn_sum_;\n};\n\n\n#endif //R0VERT_FIRMWARE_WHEEL_ENCODER_H\n" }, { "alpha_fraction": 0.650689423084259, "alphanum_fraction": 0.6825344562530518, "avg_line_length": 26.44144058227539, "blob_id": "fd0aaf1bb854fa69ed54b9a3203dcdc9936bf3dd", "content_id": "e5e880ec0336fa2b6ca344aa771d6d0cefd8ecc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3046, "license_type": "no_license", "max_line_length": 117, "num_lines": 111, "path": "/r0vert_firmware/src/as5040.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef R0VERT_FIRMWARE_AS5040_H\n#define R0VERT_FIRMWARE_AS5040_H\n\n#include \"as5040.h\"\n\nAS5040::AS5040(uint8_t pin_DO, uint8_t pin_CLK, uint8_t pin_CS) : pin_DO_(pin_DO), pin_CLK_(pin_CLK), pin_CS_(pin_CS)\n{}\n\nvoid AS5040::Init()\n{\n pinMode(pin_DO_, INPUT_PULLUP);\n pinMode(pin_CLK_, OUTPUT);\n pinMode(pin_CS_, OUTPUT);\n\n // Set high to reset AS5040 output_left\n digitalWrite(pin_CS_, HIGH);\n digitalWrite(pin_CLK_, HIGH);\n}\n\nuint16_t AS5040::Read()\n{\n digitalWrite(pin_CS_, LOW);\n\n // Update upper and lower bytes\n uint16_t data = shiftIn(pin_DO_, pin_CLK_, MSBFIRST) << 8;\n data |= shiftIn(pin_DO_, pin_CLK_, MSBFIRST);\n\n digitalWrite(pin_CS_, HIGH);\n\n last_data_ = data;\n\n return Value();\n}\n\nuint16_t AS5040::Value()\n{\n return last_data_ >> 6;\n}\n\nbool AS5040::IsValid()\n{\n if (not ParityValid(last_data_))\n {\n Serial.println();\n Serial.print(\"R1\");\n return false;\n }\n else if (GetStatus(STATUS_MAG_DEC) && GetStatus(STATUS_MAG_INC))\n {\n Serial.println();\n Serial.print(\"R2\");\n return false;\n }\n else if (GetStatus(STATUS_COF))\n {\n Serial.println();\n Serial.print(\"R3\");\n return false;\n }\n\n return true;\n}\n\nbool AS5040::GetStatus(uint8_t field)\n{\n return (last_data_ & (1 << (1 + field))) != 0;\n}\n\nbool AS5040::ParityValid(uint16_t data)\n{\n uint16_t mask = 0x8000;\n uint8_t one_bits = 0;\n while (mask >= 0x0001)\n {\n if (data & mask)\n {\n one_bits += 1;\n }\n mask = mask >> 1;\n }\n\n return (one_bits % 2 == 0);\n}\n\n#endif //R0VERT_FIRMWARE_AS5040_H\n" }, { "alpha_fraction": 0.7231612801551819, "alphanum_fraction": 0.7263590693473816, "avg_line_length": 33.74603271484375, "blob_id": "12ed61b693e69a8585a5ec39b7c17c4919262bcc", "content_id": "aa4636df5e818ad3bb04f6674f697ee5579b8fab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2189, "license_type": "no_license", "max_line_length": 81, "num_lines": 63, "path": "/r0vert_firmware/src/timer.h", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#ifndef R0VERT_FIRMWARE_TIMER_H\n#define R0VERT_FIRMWARE_TIMER_H\n\ntypedef void (*callback_ptr_)(void);\n\n/**\n * A timer for repeated callbacks at fixed time intervals.\n */\nclass Timer\n{\npublic:\n Timer(unsigned long time_interval, callback_ptr_ callback);\n\n /**\n * Repeatedly call (in short intervals) to update timers and trigger callbacks.\n */\n void Update();\n\nprivate:\n /**\n * Time interval between callbacks (in ms).\n */\n unsigned long time_interval_;\n\n /**\n * Pointer to the callback function.\n */\n callback_ptr_ callback_;\n\n /**\n * Time of last callback (in ms).\n */\n unsigned long last_callback_;\n};\n\n\n#endif //R0VERT_FIRMWARE_TIMER_H\n" }, { "alpha_fraction": 0.7167721390724182, "alphanum_fraction": 0.7278481125831604, "avg_line_length": 24.280000686645508, "blob_id": "3747aa4c3b6f6dc56a46d99206abe8cfb4c7d9a3", "content_id": "22d26b296176a5575b23f4e2716ba21174c3c11e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 74, "num_lines": 50, "path": "/r0vert_sequence_recorder/CMakeLists.txt", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(r0vert_sequence_recorder)\n\nset(CMAKE_CXX_FLAGS \"-std=c++14 ${CMAKE_CXX_FLAGS}\")\n\nfind_package(catkin REQUIRED COMPONENTS\n joy\n r0vert_msgs\n roscpp\n turtle_actionlib\n )\n\nfind_package(Boost 1.54 REQUIRED\n COMPONENTS system filesystem)\nfind_package(OpenCV 3 REQUIRED)\nfind_package(Threads REQUIRED)\n\ncatkin_package(\n # INCLUDE_DIRS include\n # LIBRARIES sequence_recorder\n CATKIN_DEPENDS joy roscpp turtle_actionlib r0vert_msgs\n DEPENDS OpenCV Threads Boost GStreamer\n)\n\n###########\n## Build ##\n###########\n\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n ${Boost_INCLUDE_DIRS}\n ${OpenCV_INCLUDE_DIRS}\n include\n)\n\nadd_executable(sequence_recorder_node\n include/sequence_recorder/sequence.h\n src/sequence.cpp\n include/sequence_recorder/image_sequence_recorder.h\n src/image_sequence_recorder.cpp\n include/sequence_recorder/still_sequence_recorder.h\n src/still_sequence_recorder.cpp\n src/sequence_recorder_node.cpp\n )\nadd_dependencies(sequence_recorder_node r0vert_msgs_generate_messages_cpp)\ntarget_link_libraries(sequence_recorder_node\n ${catkin_LIBRARIES}\n ${CMAKE_THREAD_LIBS_INIT}\n opencv_core opencv_highgui opencv_imgproc opencv_calib3d\n )\n" }, { "alpha_fraction": 0.7079176902770996, "alphanum_fraction": 0.7197631001472473, "avg_line_length": 33.869564056396484, "blob_id": "49144f8bcea92366e7820efe4f485af8165cb5a6", "content_id": "972bfdf1fdc5965f040716249a8119b6d2d6e3d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3208, "license_type": "no_license", "max_line_length": 114, "num_lines": 92, "path": "/r0vert_firmware/src/wheel_encoder.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include <ros.h>\n#include \"wheel_encoder.h\"\n\nWheelEncoder::WheelEncoder(uint8_t pin_DO, uint8_t pin_CLK, uint8_t pin_CS, bool inverted)\n : sensor_(pin_DO, pin_CLK, pin_CS), inverted_(inverted)\n{\n\n}\n\nvoid WheelEncoder::Init()\n{\n sensor_.Init();\n}\n\n/**\n * Calculates the minimum distance between two turn values in [-1,1].\n *\n * The distance between both turn value in [0, 1] is calculated by keeping the minimum difference of both possible\n * directions. The direction of the minimum distance is encoded as follows:\n * > 0 means CW\n * < 0 means CCW\n * @param value1\n * @param value2\n * @return\n */\ndouble turn_distance(double value1, double value2)\n{\n double diff = value1 - value2;\n // Return the minimum of both directions\n if (diff > 0.5)\n {\n return 1 - diff;\n }\n else if (diff < -0.5)\n {\n return -1 - diff;\n }\n return -diff;\n}\n\nvoid WheelEncoder::Update()\n{\n double sensor_value = sensor_.Read() / 1024.0;\n velocity_turn_sum_ += turn_distance(sensor_value, last_velocity_sensor_value_);\n last_velocity_sensor_value_ = sensor_value;\n}\n\ndouble WheelEncoder::Velocity()\n{\n extern ros::NodeHandle nh;\n unsigned long current_time = micros();\n\n double current_velocity = velocity_turn_sum_ / ((current_time - last_velocity_calculation_time_) * 1e-6);\n\n last_velocity_calculation_time_ = current_time;\n velocity_turn_sum_ = 0;\n return inverted_ ? -current_velocity : current_velocity;\n}\n\ndouble WheelEncoder::IncrementalDiff()\n{\n double sensor_value = sensor_.Read() / 1024.0;\n double result = turn_distance(sensor_value, last_incremental_diff_sensor_value_);\n last_incremental_diff_sensor_value_ = sensor_value;\n return inverted_ ? -result : result;\n}\n" }, { "alpha_fraction": 0.5801749229431152, "alphanum_fraction": 0.5991253852844238, "avg_line_length": 24.407407760620117, "blob_id": "4ba8d818cdc62033fca03441fa5927f729204feb", "content_id": "f7d121ac75d1ae4c0e60f02a4c0bf4d5ef25a9be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "no_license", "max_line_length": 82, "num_lines": 27, "path": "/r0vert_panel/src/r0vert_panel/status.py", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "import glob\nfrom r0vert_panel.menu import Menu, MenuItem\n\n\nclass BatteryMenuItem(MenuItem):\n def value_string(self):\n return \"%.2fV\" % self.value\n\n\nclass Status(Menu):\n def __init__(self):\n super(Status, self).__init__(\"status\", True)\n\n self.battery1 = BatteryMenuItem(\"Bat1\", 0.0)\n self.battery2 = BatteryMenuItem(\"Bat2\", 0.0)\n\n self.joy = MenuItem(\"Joy\", False)\n\n self.add(self.battery1)\n self.add(self.battery2)\n self.add(self.joy)\n\n def update(self):\n if len(glob.glob(\"/sys/class/power_supply/sony_controller_battery*\")) > 0:\n self.joy.value = True\n else:\n self.joy.value = False\n" }, { "alpha_fraction": 0.7129455804824829, "alphanum_fraction": 0.7176360487937927, "avg_line_length": 30.303030014038086, "blob_id": "57fadd748c1a7f90191c1cd7bedfec8ed7774f59", "content_id": "b934ca732e57df73f14322abc191cd32d1b20fa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 102, "num_lines": 33, "path": "/r0vert_firmware/CMakeLists.txt", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.0)\r\nproject(r0vert_firmware)\r\n\r\n# Must stay at the top for proper ordering of include directories\r\n\r\nfind_package(catkin REQUIRED COMPONENTS\r\n rosserial_arduino\r\n rosserial_msgs\r\n r0vert_msgs\r\n )\r\n\r\ncatkin_package(\r\n CATKIN_DEPENDS rosserial_arduino rosserial_msgs r0vert_msgs\r\n)\r\n\r\ninclude_directories(\r\n ${catkin_INCLUDE_DIRS}\r\n)\r\n\r\n# Custom target for generating ros_lib. Automatically executed during catkin_make.\r\nadd_custom_command(\r\n OUTPUT ${PROJECT_NAME}/ros_lib\r\n COMMAND rm -r ${PROJECT_SOURCE_DIR}/lib/ros_lib \\;\r\n ${CATKIN_DEVEL_PREFIX}/env.sh rosrun rosserial_arduino make_libraries.py ${PROJECT_SOURCE_DIR}/lib\r\n)\r\nadd_custom_target(${PROJECT_NAME}_ros_lib ALL # ALL to always execute\r\n DEPENDS ${PROJECT_NAME}/ros_lib rosserial_msgs_genpy)\r\n\r\n# Dirty Hack! Only enable executable target when in clion (needs this line for code completion)\r\nif (${CMAKE_ROOT} MATCHES ^.*clion.*$)\r\n include(CMakeListsPrivate.txt)\r\n add_executable(${PROJECT_NAME} ${INCLUDE_LIST} ${SRC_LIST})\r\nendif ()\r\n" }, { "alpha_fraction": 0.7456790208816528, "alphanum_fraction": 0.760493814945221, "avg_line_length": 18.33333396911621, "blob_id": "0220e9a2ddd76a53f3b8d0fb9d67306c692537cb", "content_id": "f0650210b35200f5e2a72eaa52e3ed6fbe1c16c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 405, "license_type": "no_license", "max_line_length": 53, "num_lines": 21, "path": "/r0vert_teleop/CMakeLists.txt", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(r0vert_teleop)\n\nset(CMAKE_CXX_FLAGS \"-std=c++14 ${CMAKE_CXX_FLAGS}\")\n\nfind_package(catkin REQUIRED COMPONENTS\n joy\n roscpp\n turtle_actionlib\n)\n\ncatkin_package(\n CATKIN_DEPENDS joy roscpp turtle_actionlib\n)\n\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n)\n\nadd_executable(teleop_joy src/teleop_joy.cpp)\ntarget_link_libraries(teleop_joy ${catkin_LIBRARIES})" }, { "alpha_fraction": 0.7212088108062744, "alphanum_fraction": 0.7244759202003479, "avg_line_length": 35.37623596191406, "blob_id": "8deec8f0ee7b775187a054c44459f69134380673", "content_id": "a71e77c4e1d9e4fce18985cefa9fedc2d8fa9fe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3673, "license_type": "no_license", "max_line_length": 103, "num_lines": 101, "path": "/r0vert_sequence_recorder/src/sequence.cpp", "repo_name": "splietker/r0vert_ros", "src_encoding": "UTF-8", "text": "/* Copyright (c) 2017, Malte Splietker\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * The names of contributors may not be used to endorse or promote\n * products derived from this software without specific prior written\n * permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include <chrono>\n#include <boost/format.hpp>\n#include <boost/property_tree/json_parser.hpp>\n\n#include <sequence_recorder/sequence.h>\n\nusing namespace std::chrono;\n\nnamespace sequence_recorder\n{\n\nstd::string GetTimeStamp()\n{\n static const std::string TIME_FORMAT = \"%F_%X\";\n\n time_t rawtime;\n time(&rawtime);\n struct tm *timeinfo = localtime(&rawtime);\n char buffer[50];\n strftime(buffer, 50, TIME_FORMAT.c_str(), timeinfo);\n\n return std::string(buffer);\n}\n\nSequence::Sequence(const std::string &output_directory, bool grayscale)\n : grayscale_(grayscale), output_path_(output_directory)\n{\n output_path_ /= GetTimeStamp();\n fs::create_directories(output_path_);\n}\n\nvoid Sequence::AddFrame(const cv::Mat &frame)\n{\n static const std::vector<int> IMAGE_OUTPUT_PARAMETERS({CV_IMWRITE_PXM_BINARY});\n static const std::string filename_format_string = \"%04d.bmp\";\n\n high_resolution_clock::time_point frame_time = high_resolution_clock::now();\n\n if (grayscale_)\n {\n// cvtColor(frame, frame, cv::COLOR_BGR2GRAY);\n }\n\n size_t frame_count = frames_tree_.size();\n std::string filename = boost::str(boost::format(filename_format_string) % frame_count);\n\n fs::path image_path = output_path_ / filename;\n imwrite(image_path.string(), frame, IMAGE_OUTPUT_PARAMETERS);\n\n pt::ptree entry;\n entry.put(\"\", filename);\n frames_tree_.push_back(std::make_pair(std::to_string(frame_time.time_since_epoch().count()), entry));\n}\n\nvoid Sequence::AddWheelVelocity(const r0vert_msgs::WheelVelocity::ConstPtr &velocity)\n{\n nanoseconds now = high_resolution_clock::now().time_since_epoch();\n pt::ptree entry;\n entry.put(\"left\", velocity->left);\n entry.put(\"right\", velocity->right);\n entry.put(\"time\", velocity->time);\n wheel_velocity_tree_.push_back(std::make_pair(std::to_string(now.count()), entry));\n\n}\n\nvoid Sequence::WriteMetadata()\n{\n fs::path wheel_velocity_file_path = output_path_ / \"wheel_velocity.json\";\n pt::write_json(wheel_velocity_file_path.string(), wheel_velocity_tree_);\n\n fs::path frames_file_path = output_path_ / \"frames.json\";\n pt::write_json(frames_file_path.string(), frames_tree_);\n}\n\n} // namespace sequence_recorder" } ]
28
stacy-eliz/git_lesson_repository
https://github.com/stacy-eliz/git_lesson_repository
933e5ef24ec810368cda52520967907b929eb300
b8804636bbaaa4e3c358a5f763f7e457cd001790
2261ead6f6e2100029414e364352bb8f1e2bb4b3
refs/heads/master
2021-08-23T20:11:44.334642
2017-12-02T13:12:19
2017-12-02T13:12:19
112,830,562
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5849056839942932, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 16.66666603088379, "blob_id": "406729e7ed05bd2da8ef3372765c3d2a7ffd5438", "content_id": "e28fb67efa7da1314bdb86a4689c945cf54f7312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/program.py", "repo_name": "stacy-eliz/git_lesson_repository", "src_encoding": "UTF-8", "text": "#!/bin/python3\nprint(\"My first Git program\")\n2017!!!\n" } ]
1
Esperanto-Arcade/Teniso
https://github.com/Esperanto-Arcade/Teniso
c1b7b44e74b1470dbc8c143e13c9d01b3442647f
8bce7fcdd0ce2faf8ea746e1fd7621387bf9186f
7fe33b6340d28cad01848c08622f53cbf82090a2
refs/heads/master
2016-08-07T08:23:34.584382
2014-05-14T23:20:03
2014-05-14T23:20:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5897867679595947, "alphanum_fraction": 0.6369248032569885, "avg_line_length": 26, "blob_id": "a317ed703dcc6874f524de4d9e1624ac3940015e", "content_id": "d6fd0ad2bf05933dc3d12a4a933434d1f68d8033", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1782, "license_type": "permissive", "max_line_length": 99, "num_lines": 66, "path": "/ludanto.py", "repo_name": "Esperanto-Arcade/Teniso", "src_encoding": "UTF-8", "text": "# Rachel J. Morris \t\t2013-07-24\t\tWTFPL CopyFree license http://copyfree.org/licenses/wtfpl2/license.txt\n\nimport pygame\n\nclass Ludanto:\n\tdef __init__( self, playerNumber ):\n\t\tself.m_dimensioj = [ 10, 50 ]\n\t\t\n\t\tself.m_koloro = pygame.Color( 255, 255, 255 )\n\t\t\n\t\tself.m_koordinatoj \t= [ 0, 0 ]\n\t\tself.m_koordinatoj[1] = 480/2 - self.m_dimensioj[1]/2\n\t\t\n\t\tself.m_rapideco = 0\n\t\tself.m_poentaro \t= 0\n\t\t\n\t\tself.m_suprenKlavo \t= pygame.K_UP\n\t\tself.m_subenKlavo \t= pygame.K_DOWN\n\t\t\n\t\tif ( playerNumber == 1 ):\n\t\t\tself.m_koordinatoj[0] = 10\n\t\t\tself.m_koloro = pygame.Color( 200, 100, 0 )\n\t\telse:\n\t\t\tself.m_koordinatoj[0] = 640 - 10 - self.m_dimensioj[0]\n\t\t\tself.m_koloro = pygame.Color( 0, 100, 200 )\n\t\t\tself.m_suprenKlavo = pygame.K_w\n\t\t\tself.m_subenKlavo = pygame.K_s\n\t# __init__ \n\t\n\tdef Movi( self, klavoj ):\n\t\t# Player Velocity adjustment\n\t\tif \t\t( klavoj[ self.m_suprenKlavo ] ):\n\t\t\tself.m_rapideco -= 1\t\n\t\telif \t( klavoj[ self.m_subenKlavo ] ):\n\t\t\tself.m_rapideco += 1\n\t\telse:\n\t\t\tif ( self.m_rapideco < 0 ):\n\t\t\t\tself.m_rapideco += 1\n\t\t\telif ( self.m_rapideco > 0 ):\n\t\t\t\tself.m_rapideco -= 1\n\t\t\t\n\t\t# Move player \n\t\tself.m_koordinatoj[1] += self.m_rapideco\n\t\t\n\t\t# Adjust coordinates\n\t\tif ( self.m_koordinatoj[1] < 0 ):\n\t\t\tself.m_koordinatoj[1] = 0\n\t\t\tself.m_rapideco = -self.m_rapideco\n\t\telif ( self.m_koordinatoj[1] + self.m_dimensioj[1] > 480 ):\n\t\t\tself.m_koordinatoj[1] = 480 - self.m_dimensioj[1]\n\t\t\tself.m_rapideco = -self.m_rapideco\n\t# Move\n\t\n\tdef Vidigi( self, fenestro ):\n\t\trect = ( self.m_koordinatoj[0], self.m_koordinatoj[1], self.m_dimensioj[0], self.m_dimensioj[1] )\n\t\tpygame.draw.rect( fenestro, self.m_koloro, rect )\n\t# Draw\n\t\n\tdef PreniPoentaro( self ):\n\t\treturn self.m_poentaro\n\t# PreniPoentaro\n\t\n\tdef AldoniAlPoentaro( self ):\n\t\tself.m_poentaro += 1\n\t# AldoniAlPoentaro\n# Player\n" }, { "alpha_fraction": 0.5872358083724976, "alphanum_fraction": 0.6348943114280701, "avg_line_length": 28.072288513183594, "blob_id": "c5b8f571461baf0760c1e52f3f6c76e6a91ea360", "content_id": "1f51fe282fc1acfcf2f51ec6c88dbee03daf2531", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2413, "license_type": "permissive", "max_line_length": 99, "num_lines": 83, "path": "/pilko.py", "repo_name": "Esperanto-Arcade/Teniso", "src_encoding": "UTF-8", "text": "# Rachel J. Morris \t\t2013-07-24\t\tWTFPL CopyFree license http://copyfree.org/licenses/wtfpl2/license.txt\n\nimport pygame\n\nclass Pilko:\n\tdef __init__( self ):\n\t\tself.m_dimensioj = [ 25, 25 ]\n\t\t\n\t\tself.m_koloro = pygame.Color( 255, 255, 255 )\n\t\t\n\t\tself.m_koordinatoj \t= [ 0, 0 ]\n\t\tself.Resxargu( 1 )\n\t# __init__ \n\t\n\tdef Resxargu( self, lundantoNombro ):\n\t\tself.m_koordinatoj[0] = 640/2 - self.m_dimensioj[0]/2\n\t\tself.m_koordinatoj[1] = 480/2 - self.m_dimensioj[1]/2\n\t\t\n\t\tif ( lundantoNombro == 1 ):\n\t\t\tself.m_rapideco = [ 2, 0 ]\n\t\telse:\n\t\t\tself.m_rapideco = [ -2, 0 ]\n\t# Resxargu\n\t\n\tdef CxuBatas( self, player ):\n\t\tif ( \tself.m_koordinatoj[0]\t\t\t\t\t< player.m_koordinatoj[0] + player.m_dimensioj[0] and\n\t\t\t\tself.m_koordinatoj[0] + self.m_dimensioj[0] > player.m_koordinatoj[0] and\n\t\t\t\tself.m_koordinatoj[1]\t\t\t\t\t< player.m_koordinatoj[1] + player.m_dimensioj[1] and\n\t\t\t\tself.m_koordinatoj[1] + self.m_dimensioj[1] > player.m_koordinatoj[1] ):\n\t\t\t# Collision\n\t\t\t\n\t\t\tself.m_rapideco[0] = -self.m_rapideco[0]\n\t\t\t\n\t\t\tif ( self.m_rapideco[0] < 0 ):\n\t\t\t\tself.m_rapideco[0] -= 1\n\t\t\telif ( self.m_rapideco[0] > 0 ):\n\t\t\t\tself.m_rapideco[0] += 1\n\t\t\t\n\t\t\t# What part of the paddle was hit?\n\t\t\tmyCenterY \t\t= self.m_koordinatoj[1] + self.m_dimensioj[1]/2\n\t\t\tpaddleCenterY \t= player.m_koordinatoj[1] + player.m_dimensioj[1]/2\n\t\t\t\n\t\t\tif ( myCenterY > paddleCenterY ):\n\t\t\t\tself.m_rapideco[1] += 2\n\t\t\telif ( myCenterY < paddleCenterY ):\n\t\t\t\tself.m_rapideco[1] -= 2\n\t\t\telse:\n\t\t\t\tself.m_rapideco[1] = 0\n\t\t\n\t\t\n\t# CxuBatas\n\t\n\tdef Movi( self ):\t\t\t\n\t\t# Move player \n\t\tself.m_koordinatoj[0] += self.m_rapideco[0]\n\t\tself.m_koordinatoj[1] += self.m_rapideco[1]\n\t\t\n\t\t# Adjust coordinates\n\t\tif ( self.m_koordinatoj[1] < 0 ):\n\t\t\tself.m_koordinatoj[1] = 0\n\t\t\tself.m_rapideco[1] = -self.m_rapideco[1]\n\t\telif ( self.m_koordinatoj[1] + self.m_dimensioj[1] > 480 ):\n\t\t\tself.m_koordinatoj[1] = 480 - self.m_dimensioj[1]\n\t\t\tself.m_rapideco[1] = -self.m_rapideco[1]\n\t# Move\n\t\n\tdef CxuLundanto2Poentas( self ):\n\t\treturn ( self.m_koordinatoj[0] + self.m_dimensioj[0] < 0 )\n\t# CxuLundanto2Poentas\n\t\n\tdef CxuLundanto1Poentas( self ):\n\t\treturn ( self.m_koordinatoj[0] > 640 )\n\t# CxuLundanto1Poentas\n\t\n\tdef Vidigi( self, fenestro ):\n\t\trect = ( self.m_koordinatoj[0], self.m_koordinatoj[1], self.m_dimensioj[0], self.m_dimensioj[1] )\n\t\tpygame.draw.rect( fenestro, self.m_koloro, rect )\n\t# Draw\n\t\n\tdef PreniPoentaro( self ):\n\t\treturn self.m_poentaro\n\t# PreniPoentaro\n# Player\n" }, { "alpha_fraction": 0.6713468432426453, "alphanum_fraction": 0.7197088003158569, "avg_line_length": 24.986486434936523, "blob_id": "3f9c05a3119ca9332ccea118ecbd004b993f9139", "content_id": "a4dfd570b3689c9eacfe5b1a5fb1b6a134c1b6a1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1923, "license_type": "permissive", "max_line_length": 122, "num_lines": 74, "path": "/main.py", "repo_name": "Esperanto-Arcade/Teniso", "src_encoding": "UTF-8", "text": "# Rachel J. Morris \t\t2013-07-24\t\tWTFPL CopyFree license http://copyfree.org/licenses/wtfpl2/license.txt\n\nimport pygame\nimport sys\n\nfrom ludanto import Ludanto\nfrom pilko import Pilko\n\npygame.init()\nfenestro = pygame.display.set_mode( ( 640, 480 ) )\n\nfontObj = pygame.font.Font( \"fonts/Averia-Bold.ttf\", 24 )\nfpsTempo = pygame.time.Clock()\n\nludantoUnu = Ludanto( 1 )\nludantoDu = Ludanto( 2 )\n\npilko = Pilko()\n\nwhile True:\n\tfor okazo in pygame.event.get():\n\t\tif ( okazo.type == pygame.QUIT ):\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\t# for okazo\n\t\n\tklavoj = pygame.key.get_pressed()\n\tludantoUnu.Movi( klavoj )\n\tludantoDu.Movi( klavoj )\n\t\n\t# Pilko movement\n\tpilko.Movi()\n\tpilko.CxuBatas( ludantoUnu )\n\tpilko.CxuBatas( ludantoDu )\n\t\n\tif ( pilko.CxuLundanto2Poentas() ):\n\t\tpilko.Resxargu( 1 )\n\t\tludantoUnu.AldoniAlPoentaro()\n\telif ( pilko.CxuLundanto1Poentas() ):\n\t\tpilko.Resxargu( 2 )\n\t\tludantoDu.AldoniAlPoentaro()\n\t\n\tpygame.display.update()\n\tfpsTempo.tick( 60 )\n\t\n\t# Draw\n\tfenestro.fill( pygame.Color( 50, 50, 50 ) )\n\n\tludantoUnu.Vidigi( fenestro )\n\tludantoDu.Vidigi( fenestro )\n\tpilko.Vidigi( fenestro )\n\t\n\tpoentaro1teksto = fontObj.render( \"Ludanto 1: \" + str( ludantoUnu.PreniPoentaro() ), False, pygame.Color( 200, 100, 0 ) )\n\tpoentaro2teksto = fontObj.render( \"Ludanto 2: \" + str( ludantoDu.PreniPoentaro() ), False, pygame.Color( 0, 100, 200 ) )\n\t\n\tpoentaro1rektangulo = poentaro1teksto.get_rect()\n\tpoentaro2rektangulo = poentaro2teksto.get_rect()\n\t\n\tpoentaro1rektangulo.x = 10\n\tpoentaro2rektangulo.x = 640 - 170\n\tpoentaro1rektangulo.y = 10\n\tpoentaro2rektangulo.y = 10\n\t\n\tfenestro.blit( poentaro1teksto, poentaro1rektangulo )\n\tfenestro.blit( poentaro2teksto, poentaro2rektangulo )\n\t\n\ttitoloTeksto = fontObj.render( \"Teniso\", False, pygame.Color( 255, 255, 255 ) )\n\ttitoloRektangulo = titoloTeksto.get_rect()\n\ttitoloRektangulo.x = 640/2 - 40\n\ttitoloRektangulo.y = 480 - 50\n\t\n\tfenestro.blit( titoloTeksto, titoloRektangulo )\n\t\n# while True\n" } ]
3
vpalex999/ebilock
https://github.com/vpalex999/ebilock
cf6314693f9493e12e4cad822ab6a9f6b8e9cd07
b27b943ceebbf823da5827ed1b74571feb55546a
40ba217badfcfdab03d2de5c337484219890fd94
refs/heads/master
2020-05-23T01:18:01.480068
2017-03-17T05:26:58
2017-03-17T05:26:58
84,738,907
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49542030692100525, "alphanum_fraction": 0.5092722177505493, "avg_line_length": 40.81698226928711, "blob_id": "535a174fbbe16434650e1a08a7891c0c02c8fa02", "content_id": "496c7e31a332a3d81eedff899db80a77c30f8b1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24060, "license_type": "no_license", "max_line_length": 136, "num_lines": 530, "path": "/sources/ebilockcmain.py", "repo_name": "vpalex999/ebilock", "src_encoding": "UTF-8", "text": "from crccheck.crc import Crc16CcittFalse\nfrom sources.crc8 import check_crc_8 as crc8\nfrom sources.error import EbException\n\n\nclass Edilock(object):\n \"\"\" class Ebilock\n \"\"\"\n def __init__(self, telegramm):\n self.telegramm = telegramm.split(' ')\n if not self._check_byte_flow():\n raise EbException(\"Error check flow\")\n\n\n# Описание структуры Заголовка и тела сетевого пакета\n desc_header_packet = {\n \"size\": 8, # количество 2х байтовых слов в заголовке - константа\n \"ID_SOURCE_IND\": 0, # ID отправителя\n \"ID_DEST_IND\": 1, # ID получателя\n \"TYPE_PACKET_IND\": 2, # Тип пакета\n \"START_DATA_IND\": 3, # Длинна пакета 4 2х байтовых слова - начало\n \"END_DATA_IND\": 6, # Длинна пакета 4 2х байтовых слова - конец\n \"NUL_BYTE_IND\": 7, # Нулевой байт - всегда 00\n \"PACKET_COUNT_A_IND\": 8, # Счётчик А пакета\n \"PACKET_COUNT_B_IND\": 9, # Счётчик В пакета\n \"START_SIZE_AB_IND\": 10, # Размер блока телеграм - начала\n \"END_SIZE_AB_IND\": 12, # Размер блока телеграм - конец\n \"ID\": # ID идентификатор отправителя или получателя\n {\"0\": \"IPU_GATE_RF\", # Ebilock940\n \"1\": \"EHA\" # Внешняя система\n },\n \"TYPE_ID\": # Тип телеграммы\n {2: \"2 - накачка\",\n 3: \"3 - передача статусов\",\n 4: \"4 - пустая накачка\",\n 5: \"5 - IPU_GATE_RF -> OK\",\n 6: \"6 - OK -> IPU_GATE_RF\"\n },\n \"TLG_AB\":\n {\n \"OK_START\": 0,\n \"OK_END\": 1,\n \"ML_CO\": 2,\n \"COUNT_AB\": 3,\n \"co\":\n {\n 4: \"4 - приказ, телеграмма А (отправитель Ebilock950 R4)\",\n 6: \"6 - приказ, телеграмма B (отправитель Ebilock950 R4)\",\n 8: \"8 - статус, телеграмма А (отправитель EHA)\",\n \"C\": \"C - статус, телеграмма B (отправитель EHA)\"\n },\n },\n }\n\n # Описание структуры блока телеграм\n desc_telegramm_ab = {\n \"pass\": \"\"\n }\n\n telegramm_decode = {\n \"PACKET\": \"\",\n \"ID_SOURCE\": \"\",\n \"ID_DEST\": \"\",\n \"TYPE_PACKET\": \"\",\n \"LENGTH_PACKET\": \"\",\n \"PACKET_COUNT_A\": \"\",\n \"PACKET_COUNT_B\": \"\",\n \"SIZE_AB\": \"\",\n \"TELEGRAMM_AB\": \"\",\n \"RC\": \"\",\n \"TLG_A\": {\n \"BODY_TLG\": \"\",\n \"ADDR_OK\": \"\",\n \"LOOP_OK\": \"\",\n \"AREA_OK\": \"\",\n \"HUB_OK\": \"\",\n \"NUMBER_OK\": \"\",\n \"ML_CO\": \"\",\n \"SIZE\": \"\",\n \"type_co\": \"\",\n \"COUNT\": \"\",\n \"DATA\": \"\",\n \"RC\": \"\"\n },\n \"TLG_B\": {},\n \"STATUS_ZONE\": \"\"\n }\n\n def _check_byte_flow(self):\n \"\"\" Verifying bytes in the packet stream\\\n and writing a package to a dictionary.\\n\n check_byte_flow(\"00, ff\")\\n\n ARG: String of bytes in hex.\n \"\"\"\n\n status = True\n sources = self.telegramm\n if len(sources) < 20:\n print(\"Invalid package '{}' 2xByte, min = 26 2xByte\".format(len(sources)))\n #return False\n for item in sources:\n if item == '':\n status = False\n print(\"Empty value by index '{}'\".format(sources.index(\"\")))\n break\n if len(item) != 2:\n status = False\n print(\"Length value '{}' is not equal to 2\".format(item))\n break\n self.telegramm_decode[\"PACKET\"] = sources\n return status\n\n # Декодирование заголовка пакета приказа\n def check_header_packet(self):\n \"\"\" Decoding the packet header\\\n and writing data to a dictionary.\\n\n check_header_packet()\\n\n ARG: String of bytes in hex.\n \"\"\"\n\n status = True\n sources = self.telegramm\n\n # Проверка, сохранение ID отправителя\n tmp = int(sources[self.desc_header_packet[\"ID_SOURCE_IND\"]], 16)\n if tmp > 1:\n print(\"Error! ID_SOURCE = '{}' should be between 0 or 1\".format(tmp))\n status = False\n else:\n # self.telegramm_decode[\"ID_SOURCE\"] = self.desc_header_packet[\"ID\"][str(tmp)]\n self.telegramm_decode[\"ID_SOURCE\"] = tmp\n # Проверка, сохранение ID получателя\n tmp = int(sources[self.desc_header_packet[\"ID_DEST_IND\"]], 16)\n if tmp > 1:\n print(\"Error! ID_DEST = '{}' should be between 0 or 1\".format(tmp))\n status = False\n else:\n # self.telegramm_decode[\"ID_DEST\"] = self.desc_header_packet[\"ID\"][str(tmp)]\n self.telegramm_decode[\"ID_DEST\"] = tmp\n\n # Проверка, сохранение типа пакета\n tmp = int(sources[self.desc_header_packet[\"TYPE_PACKET_IND\"]], 16)\n key_stat = False\n type_id = self.desc_header_packet[\"TYPE_ID\"]\n for key, val in type_id.items():\n if int(key) == tmp:\n self.telegramm_decode[\"TYPE_PACKET\"] = key\n key_stat = True\n break\n if not key_stat:\n print(\"Value '{}' out of range type telegramm\".format(tmp))\n status = False\n\n # Проверка, сохранение соответствия указанной длинны пакета\n tmp = int(''.join(sources[self.desc_header_packet[\"START_DATA_IND\"]:self.desc_header_packet[\"END_DATA_IND\"] + 1]), 16)\n if tmp != len(sources):\n print(\"Error Checking length packet!!! data length = '{0}', actual length = '{1}'\".format(tmp, len(sources)))\n status = False\n else:\n self.telegramm_decode[\"LENGTH_PACKET\"] = tmp\n # Проверка на максимально допустимое значение длинны пакета приказа\n tmp = int(''.join(sources[self.desc_header_packet[\"START_SIZE_AB_IND\"]:self.desc_header_packet[\"END_SIZE_AB_IND\"]]), 16)\n if tmp > 4096:\n print(\"Too long data > 4096 bytes - '{}'\".format(tmp))\n status = False\n # Проверка NULL байта\n tmp = int(sources[self.desc_header_packet[\"NUL_BYTE_IND\"]])\n if tmp != 0:\n print(\"Invalid header structure, Zero byte value = '{}', must be 0\".format(tmp))\n status = False\n return status\n\n # Чтение и проверка согласованности счётчиков A/B пакета приказа\n def _check_count_ab_packet(self):\n \"\"\" Reading and checking the consistency\\\n of counters A / B order package\\n\n check_count_ab_packet()\\n\n ARG: String of bytes in hex.\n \"\"\"\n\n ct_A = self.telegramm_decode[\"PACKET_COUNT_A\"]\n if ct_A == 0 or ct_A == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_A))\n return False\n ct_B = self.telegramm_decode[\"PACKET_COUNT_B\"]\n if ct_B == 0 or ct_B == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_B))\n return False\n ct_a = self.telegramm_decode[\"TLG_A\"][\"COUNT\"]\n if ct_a == 0 or ct_a == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_a))\n return False\n ct_b = self.telegramm_decode[\"TLG_B\"][\"COUNT\"]\n if ct_b == 0 or ct_b == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_b))\n return False\n if ct_A + ct_B == 255:\n if ct_a + ct_b == 255:\n if ct_A - ct_a == 0 and ct_B - ct_b == 0:\n return True\n else:\n print(\"Sum values count packet and count telegramm are not equal\")\n else:\n if ct_A - ct_a == 0:\n print(\"Error_ctb\")\n return False\n else:\n print(\"Error_cta\")\n return False\n else:\n if ct_A - ct_a == 0:\n print(\"Error_ctb_gl\")\n return False\n else:\n print(\"Error_cta_gl\")\n return False\n\n # Проверка длинны блока телеграмм A/B\n def check_body_telegramm_ab(self):\n \"\"\" Check the length of the block of telegrams A / B\\n\n check_telegramm_ab(\"00, ff\")\\n\n ARG: String of bytes in hex.\n \"\"\"\n sources = self.telegramm\n # Читаем байт с длинной телеграммы A/B\n size_ab = int(''.join(sources[self.desc_header_packet[\"START_SIZE_AB_IND\"]:self.desc_header_packet[\"END_SIZE_AB_IND\"]]), 16)\n if size_ab == 0:\n print(\"Empty data A/B - '{}'\".format(size_ab))\n return False\n # сохраняем размер телеграммы\n len_tlg_ab = self.telegramm_decode[\"LENGTH_PACKET\"] - 14\n if not len_tlg_ab == size_ab:\n print(\"Error len A/B\")\n return False\n self.telegramm_decode[\"SIZE_AB\"] = size_ab\n start_ab = self.desc_header_packet[\"END_SIZE_AB_IND\"]\n end_ab = size_ab + start_ab\n tlg_ab = sources[start_ab:end_ab]\n if not size_ab == len(tlg_ab):\n print(\"packet length '{0}' is not equal to the value size A/B '{}'\".format(len(tlg_ab), size_ab))\n return False\n else:\n self.telegramm_decode[\"TELEGRAMM_AB\"] = tlg_ab\n #len_tlg_ab = self.telegramm_decode[\"LENGTH_PACKET\"] - 14\n if size_ab == len_tlg_ab:\n return True\n else:\n print(\"packet length '{0}' is not equal to the value size A/B '{}'\".format(len_tlg_ab, size_ab))\n return False\n\n # Проверка контрольной суммы пакета CRC-16\n def _check_rc_16(self):\n \"\"\" checksum packet CRC-16\\n\n _check_rc_16(\"00, ff\")\\n\n ARG: String of bytes in hex.\n \"\"\"\n sources = self.telegramm\n\n r_c = ''.join(sources[len(sources)-2:])\n self.telegramm_decode[\"RC\"] = r_c\n body_packet = bytearray.fromhex(''.join(sources[:len(sources)-2]))\n get_check_rc = Crc16CcittFalse.calchex(body_packet)\n if r_c == get_check_rc.upper():\n return True\n else:\n print(\"Wrong checksum CRC-16 !!!\")\n return False\n\n # битовый сдвиг вправо\n def _bit_shift_right(self, string_byte):\n tmp = int(string_byte, 16)\n return tmp >> 1\n\n # Делает инверсию бит в байте. На вход получает список\n def _inversion_byte(self, hex_list):\n tmp = []\n for item in hex_list:\n str_up = \"{:02x}\".format(int(item, 16).__xor__(255))\n tmp.append(str_up.upper())\n return tmp\n\n def decode_telegram(self, dsc_tel, telegramm_dec, type=None):\n \"\"\"\n type = TLG_A or TLG_B\n \"\"\"\n _dsc_tlg = dsc_tel[\"TLG_AB\"]\n telegramm = telegramm_dec[type][\"BODY_TLG\"]\n # Читаем общий адрес ОК\n _ok = ''.join(telegramm[_dsc_tlg[\"OK_START\"]:_dsc_tlg[\"OK_END\"]+1])\n # Запишем общий адрес ОК\n telegramm_dec[type][\"ADDR_OK\"] = _ok\n # Читаем и запишем loop - петля\n telegramm_dec[type][\"LOOP_OK\"] = _ok[0]\n # Читаем и запишем area - область\n telegramm_dec[type][\"AREA_OK\"] = self._bit_shift_right(_ok[1])\n # Читаем и запишем hub - концентратор\n telegramm_dec[type][\"HUB_OK\"] = _ok[2]\n # Читаем и запишем number_ok - номер ОК\n telegramm_dec[type][\"NUMBER_OK\"] = self._bit_shift_right(_ok[3])\n # Читаем и запишем ML/CO\n telegramm_dec[type][\"ML_CO\"] = telegramm[_dsc_tlg[\"ML_CO\"]]\n # Читаем и запишем длинну телеграммы\n telegramm_dec[type][\"SIZE\"] = int(telegramm_dec[type][\"ML_CO\"][0], 16)\n # Читаем и запишем тип телеграммы\n telegramm_dec[type][\"type_co\"] = int(telegramm_dec[type][\"ML_CO\"][1], 16)\n # Читаем и запишем счетчик телеграммы\n telegramm_dec[type][\"COUNT\"] = int(telegramm[_dsc_tlg[\"COUNT_AB\"]], 16)\n # Читаем и запишем блок DATA\n telegramm_dec[type][\"DATA\"] = telegramm[_dsc_tlg[\"COUNT_AB\"]+1:telegramm_dec[type][\"SIZE\"]-1]\n # Читаем и запишем контрольную сумму\n telegramm_dec[type][\"RC\"] = telegramm[telegramm_dec[type][\"SIZE\"]-1]\n # Проверка CRC8\n block_crc = str(telegramm_dec[type][\"ADDR_OK\"]) + \\\n str(telegramm_dec[type][\"ML_CO\"]) + \\\n str(''.join(self.telegramm_decode[type][\"DATA\"]))\n if not telegramm_dec[type][\"RC\"] == crc8(block_crc):\n #print(\"Wrong checksum CRC-8 !!!\")\n return False\n else:\n return True\n\n # Декодируем блок DATA\n def decode_zone_status(self, data_list):\n status_zone = {}\n zon = data_list[::-1]\n try:\n key_zone_ = 1\n for zone in zon:\n bin_zones = \"{:08b}\".format(int(zone, 16))\n zon_offset = -2\n zon_offset_str = 8\n print(\"\")\n for key in range(0, 4):\n status_zone[key_zone_+key] = int(bin_zones[zon_offset:zon_offset_str], 2)\n print(\"Zona_{} = {}\".format(\n key_zone_ + key, int(bin_zones[zon_offset:zon_offset_str], 2)))\n zon_offset += -2\n zon_offset_str -= 2\n key_zone_ += key + 1\n self.telegramm_decode[\"STATUS_ZONE\"] = status_zone\n return True\n except:\n print(\"Error decode block DATA\")\n return False\n\n def _check_id_packet(self):\n type_packet = self.telegramm_decode[\"TYPE_PACKET\"] # ID отправителя\n source_id = self.telegramm_decode[\"ID_SOURCE\"]\n dest_id = self.telegramm_decode[\"ID_DEST\"]\n # Если ID источника и ID получателя равны\n if source_id == dest_id:\n # Если тип передачи IPU_GATE_RF => EHA\n if type_packet == 2 or type_packet == 4 or type_packet == 5:\n if source_id == 0:\n print(\"Error ID resive.\")\n return False\n else:\n print(\"Error ID Send.\")\n return False\n else:\n return True\n\n def _check_type_packet(self):\n type_packet = self.telegramm_decode[\"TYPE_PACKET\"] # ID отправителя\n source_id = self.telegramm_decode[\"ID_SOURCE\"]\n dest_id = self.telegramm_decode[\"ID_DEST\"]\n if source_id == 0 and dest_id == 1 and type_packet == 3 or\\\n source_id == 0 and dest_id == 1 and type_packet == 6 or\\\n source_id == 1 and dest_id == 0 and type_packet == 2 or\\\n source_id == 1 and dest_id == 0 and type_packet == 4 or\\\n source_id == 1 and dest_id == 0 and type_packet == 6:\n print(\"Error TYPE_ID\")\n return False\n else:\n return True\n\n # Декодируем тело пакета - телеграммы A/B\n def check_decode_ab(self):\n # Описание байтов телеграммы A/B\n\n _desc_tlg = self.desc_header_packet[\"TLG_AB\"]\n # Тело пакета телеграмм A/B\n _telegramm_ab = self.telegramm_decode[\"TELEGRAMM_AB\"]\n # Определяем тип телеграмм\n mlco = _telegramm_ab[_desc_tlg[\"ML_CO\"]]\n type_co = int(mlco[1], 16)\n type_packet = self.telegramm_decode[\"TYPE_PACKET\"] # ID отправителя\n source_id = self.telegramm_decode[\"ID_SOURCE\"]\n dest_id = self.telegramm_decode[\"ID_DEST\"]\n # Проверка направления\n # Если это приказ\n\n if (type_packet == 2 and type_co == 4 or type_packet == 2 and type_co == 6):\n if source_id != 0:\n print(\"Error ID Send.\")\n return False\n if dest_id == 0:\n print(\"Error ID Resive.\")\n return False\n if type_co == 6: # Если первым пришёл приказ, телеграмма B\n print(\"There is no telegram A\")\n return False\n elif type_co == 4: # Приказ, телеграмма A\n self.telegramm_decode[\"TLG_A\"][\"ML_CO\"] = _telegramm_ab[_desc_tlg[\"ML_CO\"]]\n # Вычисляем размер телеграммы А\n self.telegramm_decode[\"TLG_A\"][\"SIZE\"] = int(self.telegramm_decode[\"TLG_A\"][\"ML_CO\"][0], 16)\n # Вычисляем блок телеграммы A\n self.telegramm_decode[\"TLG_A\"][\"BODY_TLG\"] = _telegramm_ab[:self.telegramm_decode[\"TLG_A\"][\"SIZE\"]]\n # Вычисляем блок телеграммы B\n self.telegramm_decode[\"TLG_B\"][\"BODY_TLG\"] = _telegramm_ab[self.telegramm_decode[\"TLG_A\"][\"SIZE\"]:]\n\n # Сравниваем телеграм A и B по длинне\n len_a = len(self.telegramm_decode[\"TLG_A\"][\"BODY_TLG\"])\n len_b = len(self.telegramm_decode[\"TLG_B\"][\"BODY_TLG\"])\n # Если нет телеграммы B\n if len_b == 0:\n print(\"There is no telegram B\")\n return False\n # Если размер телеграмм не совпадает\n if not len_a == len_b:\n # Пишем ошибку\n print(\"The length telegramm A({0}) - is not equal to the length telegramm B({1})\".format(len_a, len_b))\n # Прерываем работу\n return False\n else:\n # Обработка телеграммы А. Проверка CRC\n crc_a_status = self.decode_telegram(self.desc_header_packet, self.telegramm_decode, \"TLG_A\")\n crc_b_status = self.decode_telegram(self.desc_header_packet, self.telegramm_decode, \"TLG_B\")\n if not crc_a_status and not crc_b_status:\n print(\"Wrong checksum CRC-8 of the telegramms A and B!!!\")\n return False\n if not crc_a_status:\n print(\"Wrong checksum CRC-8 of the telegramm A!!!\")\n return False\n # Обработка телеграммы B. Проверка CRC\n if not crc_b_status:\n print(\"Wrong checksum CRC-8 of the telegramm B!!!\")\n return False\n # Проверка идентичности телеграмм A/B\n if not self.telegramm_decode[\"TLG_A\"][\"DATA\"] == self._inversion_byte(self.telegramm_decode[\"TLG_B\"][\"DATA\"]):\n print(\"The data telegramm A is not equal to the data telegramm B\")\n return False\n else:\n self.telegramm_decode[\"PACKET_COUNT_A\"] = int(self.telegramm[self.desc_header_packet[\"PACKET_COUNT_A_IND\"]], 16)\n self.telegramm_decode[\"PACKET_COUNT_B\"] = int(self.telegramm[self.desc_header_packet[\"PACKET_COUNT_B_IND\"]], 16)\n ## Проверка счётчиков телеграмм A/B\n #count_ab = self.telegramm_decode[\"TLG_A\"][\"COUNT\"] + self.telegramm_decode[\"TLG_B\"][\"COUNT\"]\n #if not count_ab == 255:\n # print(\"The sum of the values count A/B of telegramm A/B '{}'\\\n # is not equal to the value '255'\".format(count_ab))\n # return False\n #else:\n # if not self.decode_zone_status(self.telegramm_decode[\"TLG_A\"][\"DATA\"]):\n # print(\"Wrong decode block Data\")\n # return False\n # else:\n return True\n\n # Если это передача статусов\n if (type_packet == 3 and type_co == 8 or type_packet == 3 and type_co == 8):\n print(\"This send status\")\n else:\n print(\"Error checking type CO of telegramm. CO = '{}, TYPE_PACKET = '{}'\".format(type_co, type_packet))\n return False\n\n def check_global_count_order(self):\n \"\"\" Reading and checking the consistency\\\n of counters A / B order package\\n\n check_count_ab_packet()\\n\n ARG: String of bytes in hex.\n \"\"\"\n\n ct_A = self.telegramm_decode[\"PACKET_COUNT_A\"]\n if ct_A == 0 or ct_A == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_A))\n return False\n ct_B = self.telegramm_decode[\"PACKET_COUNT_B\"]\n if ct_B == 0 or ct_B == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_B))\n return False\n ct_a = self.telegramm_decode[\"TLG_A\"][\"COUNT\"]\n if ct_a == 0 or ct_a == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_a))\n return False\n ct_b = self.telegramm_decode[\"TLG_B\"][\"COUNT\"]\n if ct_b == 0 or ct_b == 255:\n print(\"The value can not be 0 or 255: '{}'\".format(ct_b))\n return False\n if ct_A + ct_B == 255:\n if ct_a + ct_b == 255:\n if ct_A - ct_a == 0 and ct_B - ct_b == 0:\n return True\n else:\n print(\"Sum values count packet and count telegramm are not equal\")\n else:\n if ct_A - ct_a == 0:\n print(\"Error_ctb\")\n return False\n else:\n print(\"Error_cta\")\n return False\n else:\n if ct_A - ct_a == 0:\n print(\"Error_ctb_gl\")\n return False\n else:\n print(\"Error_cta_gl\")\n return False\n\n ## Проверка счётчиков телеграмм A/B\n #if self.telegramm_decode[\"TLG_A\"][\"COUNT\"] == self.telegramm_decode[\"PACKET_COUNT_A\"] and\\\n # self.telegramm_decode[\"TLG_B\"][\"COUNT\"] == self.telegramm_decode[\"PACKET_COUNT_B\"]:\n # return True\n #else:\n # print(\"Summ Count A/B of packet is not equal to the summ count telegramm A/B\")\n # return False\n\n # Проверка правильности принятой телеграммы\n def check_telegramm(self):\n if not self._check_rc_16() or not\\\n self.check_header_packet() or not\\\n self.check_body_telegramm_ab() or not\\\n self._check_id_packet() or not\\\n self._check_type_packet() or not\\\n self.check_decode_ab() or not\\\n self.check_global_count_order() or not\\\n self.decode_zone_status(''.join(self.telegramm_decode['TLG_A']['DATA'])):\n return False\n else:\n return True\n" } ]
1
tomasatdatabricks/mlflow
https://github.com/tomasatdatabricks/mlflow
62fe70fd735da729b58cbbd04da39db49e260711
4aa102018805eff33e5c176658f59fd116343dd5
aff66fa9f3509949cf5368b963ec050db8d5137f
refs/heads/master
2022-10-15T14:28:45.620117
2019-02-25T23:24:20
2019-02-25T23:24:20
136,542,352
2
0
Apache-2.0
2018-06-07T23:36:27
2019-02-25T23:25:20
2019-05-09T23:19:06
Python
[ { "alpha_fraction": 0.671342670917511, "alphanum_fraction": 0.6833667159080505, "avg_line_length": 36, "blob_id": "29e5f161dc3b1b4344a063091bc319bb4dd99ce8", "content_id": "f224e89e63305f70564d9cb60da1e25bb0e54b1d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 998, "license_type": "permissive", "max_line_length": 87, "num_lines": 27, "path": "/tests/utils/test_utils.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom mlflow.utils import get_unique_resource_id\n\n\ndef test_get_unique_resource_id_respects_max_length():\n for max_length in range(5, 30, 5):\n for _ in range(10000):\n assert len(get_unique_resource_id(max_length=max_length)) <= max_length\n\n\ndef test_get_unique_resource_id_with_invalid_max_length_throws_exception():\n with pytest.raises(ValueError):\n get_unique_resource_id(max_length=-50)\n\n with pytest.raises(ValueError):\n get_unique_resource_id(max_length=0)\n\ndef test_get_jsonnable_obj():\n import json\n from mlflow.utils import get_jsonable_obj\n from mlflow.utils.rest_utils import NumpyEncoder\n py_ary = [[\"a\", \"b\", \"c\"],[\"e\", \"f\", \"g\"]]\n np_ary = get_jsonable_obj(np.array(py_ary))\n assert json.dumps(py_ary, cls=NumpyEncoder) == json.dumps(np_ary, cls=NumpyEncoder)\n np_ary = get_jsonable_obj(np.array(py_ary, dtype=type(str)))\n assert json.dumps(py_ary, cls=NumpyEncoder) == json.dumps(np_ary, cls=NumpyEncoder)" }, { "alpha_fraction": 0.6936339735984802, "alphanum_fraction": 0.700596809387207, "avg_line_length": 34.069766998291016, "blob_id": "b7d546f3c6d6e8d31c17ed01cdb80ed8cf0fb643", "content_id": "d2609cec53132acaf862069b308e2b66e04d029d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3016, "license_type": "permissive", "max_line_length": 118, "num_lines": 86, "path": "/README.rst", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "===================\nMLflow Beta Release\n===================\n\n**Note:** The current version of MLflow is a beta release. This means that APIs and data formats\nare subject to change!\n\n**Note 2:** We do not currently support running MLflow on Windows. Despite this, we would appreciate any contributions\nto make MLflow work better on Windows.\n\nInstalling\n----------\nInstall MLflow from PyPi via ``pip install mlflow``\n\nMLflow requires ``conda`` to be on the ``PATH`` for the projects feature.\n\nNightly snapshots of MLflow master are also available `here <https://mlflow-snapshots.s3-us-west-2.amazonaws.com/>`_.\n\nDocumentation\n-------------\nOfficial documentation for MLflow can be found at https://mlflow.org/docs/latest/index.html.\n\nCommunity\n---------\nTo discuss MLflow or get help, please subscribe to our mailing list (mlflow-users@googlegroups.com) or\njoin us on Slack at https://tinyurl.com/mlflow-slack.\n\nTo report bugs, please use GitHub issues.\n\nRunning a Sample App With the Tracking API\n------------------------------------------\nThe programs in ``examples`` use the MLflow Tracking API. For instance, run::\n\n python examples/quickstart/mlflow_tracking.py\n\nThis program will use `MLflow Tracking API <https://mlflow.org/docs/latest/tracking.html>`_,\nwhich logs tracking data in ``./mlruns``. This can then be viewed with the Tracking UI.\n\n\nLaunching the Tracking UI\n-------------------------\nThe MLflow Tracking UI will show runs logged in ``./mlruns`` at `<http://localhost:5000>`_.\nStart it with::\n\n mlflow ui\n\n**Note:** Running ``mlflow ui`` from within a clone of MLflow is not recommended - doing so will\nrun the dev UI from source. We recommend running the UI from a different working directory, using the\n``--file-store`` option to specify which log directory to run against. Alternatively, see instructions\nfor running the dev UI in the `contributor guide <CONTRIBUTING.rst>`_.\n\n\nRunning a Project from a URI\n----------------------------\nThe ``mlflow run`` command lets you run a project packaged with a MLproject file from a local path\nor a Git URI::\n\n mlflow run examples/sklearn_elasticnet_wine -P alpha=0.4\n\n mlflow run https://github.com/mlflow/mlflow-example.git -P alpha=0.4\n\nSee ``examples/sklearn_elasticnet_wine`` for a sample project with an MLproject file.\n\n\nSaving and Serving Models\n-------------------------\nTo illustrate managing models, the ``mlflow.sklearn`` package can log scikit-learn models as\nMLflow artifacts and then load them again for serving. There is an example training application in\n``examples/sklearn_logisitic_regression/train.py`` that you can run as follows::\n\n $ python examples/sklearn_logisitic_regression/train.py\n Score: 0.666\n Model saved in run <run-id>\n\n $ mlflow sklearn serve -r <run-id> -m model\n\n $ curl -d '[{\"x\": 1}, {\"x\": -1}]' -H 'Content-Type: application/json' -X POST localhost:5000/invocations\n\n\n\n\n\nContributing\n------------\nWe happily welcome contributions to MLflow. Please see our `contribution guide <CONTRIBUTING.rst>`_\nfor details.\n" }, { "alpha_fraction": 0.6187922954559326, "alphanum_fraction": 0.6214732527732849, "avg_line_length": 40.44444274902344, "blob_id": "89b0f9b49155e7361d970f77b157e83cf423791c", "content_id": "c0a1af758c421722e4059af77d067fc6a19a0ae4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7833, "license_type": "permissive", "max_line_length": 98, "num_lines": 189, "path": "/mlflow/pyfunc/scoring_server.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "\"\"\"\nScoring server for python model format.\nThe passed int model is expected to have function:\n predict(pandas.Dataframe) -> pandas.DataFrame\n\nInput, expected intext/csv or application/json format,\nis parsed into pandas.DataFrame and passed to the model.\n\nDefines two endpoints:\n /ping used for health check\n /invocations used for scoring\n\"\"\"\nfrom __future__ import print_function\n\nimport json\nimport traceback\nimport logging\n\nimport pandas as pd\nimport flask\nfrom six import reraise\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import MALFORMED_REQUEST, BAD_REQUEST\nfrom mlflow.utils.rest_utils import NumpyEncoder\nfrom mlflow.server.handlers import catch_mlflow_exception\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom mlflow.utils import get_jsonable_obj\n\nCONTENT_TYPE_CSV = \"text/csv\"\nCONTENT_TYPE_JSON = \"application/json\"\nCONTENT_TYPE_JSON_RECORDS_ORIENTED = \"application/json; format=pandas-records\"\nCONTENT_TYPE_JSON_SPLIT_ORIENTED = \"application/json; format=pandas-split\"\n\nCONTENT_TYPES = [\n CONTENT_TYPE_CSV,\n CONTENT_TYPE_JSON,\n CONTENT_TYPE_JSON_RECORDS_ORIENTED,\n CONTENT_TYPE_JSON_SPLIT_ORIENTED\n]\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef parse_json_input(json_input, orient=\"split\"):\n \"\"\"\n :param json_input: A JSON-formatted string representation of a Pandas DataFrame, or a stream\n containing such a string representation.\n :param orient: The Pandas DataFrame orientation of the JSON input. This is either 'split'\n or 'records'.\n \"\"\"\n # pylint: disable=broad-except\n try:\n return pd.read_json(json_input, orient=orient)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Pandas DataFrame. Ensure that the input is\"\n \" a valid JSON-formatted Pandas DataFrame with the `{orient}` orient\"\n \" produced using the `pandas.DataFrame.to_json(..., orient='{orient}')`\"\n \" method.\".format(orient=orient)),\n error_code=MALFORMED_REQUEST)\n\n\ndef parse_csv_input(csv_input):\n \"\"\"\n :param csv_input: A CSV-formatted string representation of a Pandas DataFrame, or a stream\n containing such a string representation.\n \"\"\"\n # pylint: disable=broad-except\n try:\n return pd.read_csv(csv_input)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Pandas DataFrame. Ensure that the input is\"\n \" a valid CSV-formatted Pandas DataFrame produced using the\"\n \" `pandas.DataFrame.to_csv()` method.\"),\n error_code=MALFORMED_REQUEST)\n\n\ndef _handle_serving_error(error_message, error_code):\n \"\"\"\n Logs information about an exception thrown by model inference code that is currently being\n handled and reraises it with the specified error message. The exception stack trace\n is also included in the reraised error message.\n\n :param error_message: A message for the reraised exception.\n :param error_code: An appropriate error code for the reraised exception. This should be one of\n the codes listed in the `mlflow.protos.databricks_pb2` proto.\n \"\"\"\n traceback_buf = StringIO()\n traceback.print_exc(file=traceback_buf)\n reraise(MlflowException,\n MlflowException(\n message=error_message,\n error_code=error_code,\n stack_trace=traceback_buf.getvalue()))\n\n\nlogged_pandas_records_format_warning = False\n\n\ndef init(model):\n \"\"\"\n Initialize the server. Loads pyfunc model from the path.\n \"\"\"\n app = flask.Flask(__name__)\n\n @app.route('/ping', methods=['GET'])\n def ping(): # pylint: disable=unused-variable\n \"\"\"\n Determine if the container is working and healthy.\n We declare it healthy if we can load the model successfully.\n \"\"\"\n health = model is not None\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')\n\n @app.route('/invocations', methods=['POST'])\n @catch_mlflow_exception\n def transformation(): # pylint: disable=unused-variable\n \"\"\"\n Do an inference on a single batch of data. In this sample server,\n we take data as CSV or json, convert it to a Pandas DataFrame,\n generate predictions and convert them back to CSV.\n \"\"\"\n # Convert from CSV to pandas\n if flask.request.content_type == CONTENT_TYPE_CSV:\n data = flask.request.data.decode('utf-8')\n csv_input = StringIO(data)\n data = parse_csv_input(csv_input=csv_input)\n elif flask.request.content_type == CONTENT_TYPE_JSON:\n global logged_pandas_records_format_warning\n if not logged_pandas_records_format_warning:\n _logger.warning(\n \"**IMPORTANT UPDATE**: Starting in MLflow 0.9.0, requests received with a\"\n \" `Content-Type` header value of `%s` will be interpreted\"\n \" as JSON-serialized Pandas DataFrames with the `split` orient, instead\"\n \" of the `records` orient. The `records` orient is unsafe because\"\n \" it may not preserve column ordering. Client code should be updated to\"\n \" either send serialized DataFrames with the `split` orient and the\"\n \" `%s` content type (recommended) or use the `%s` content type with the\"\n \" `records` orient. For more information, see\"\n \" https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment.\\n\",\n CONTENT_TYPE_JSON,\n CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n CONTENT_TYPE_JSON_RECORDS_ORIENTED)\n logged_pandas_records_format_warning = True\n data = parse_json_input(json_input=flask.request.data.decode('utf-8'),\n orient=\"records\")\n elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:\n data = parse_json_input(json_input=flask.request.data.decode('utf-8'),\n orient=\"records\")\n elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:\n data = parse_json_input(json_input=flask.request.data.decode('utf-8'),\n orient=\"split\")\n else:\n return flask.Response(\n response=(\"This predictor only supports the following content types,\"\n \" {supported_content_types}. Got '{received_content_type}'.\".format(\n supported_content_types=CONTENT_TYPES,\n received_content_type=flask.request.content_type)),\n status=415,\n mimetype='text/plain')\n\n # Do the prediction\n # pylint: disable=broad-except\n try:\n raw_predictions = model.predict(data)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Encountered an unexpected error while evaluating the model. Verify\"\n \" that the serialized input Dataframe is compatible with the model for\"\n \" inference.\"),\n error_code=BAD_REQUEST)\n\n predictions = get_jsonable_obj(raw_predictions, pandas_orient=\"records\")\n result = json.dumps(predictions, cls=NumpyEncoder)\n return flask.Response(response=result, status=200, mimetype='application/json')\n\n return app\n" }, { "alpha_fraction": 0.6436018943786621, "alphanum_fraction": 0.6450237035751343, "avg_line_length": 44.869564056396484, "blob_id": "341eba965bd2f01a8a6b285fb1b508220ace4815", "content_id": "92d22fb45ceaf84ed23d01043bf6a6af21392b44", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2110, "license_type": "permissive", "max_line_length": 99, "num_lines": 46, "path": "/tests/test_cli.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "from click.testing import CliRunner\nfrom mock import mock\n\nfrom mlflow.cli import server, run\n\n\ndef test_server_static_prefix_validation():\n with mock.patch(\"mlflow.cli._run_server\") as run_server_mock:\n CliRunner().invoke(server)\n run_server_mock.assert_called_once()\n with mock.patch(\"mlflow.cli._run_server\") as run_server_mock:\n CliRunner().invoke(server, [\"--static-prefix\", \"/mlflow\"])\n run_server_mock.assert_called_once()\n with mock.patch(\"mlflow.cli._run_server\") as run_server_mock:\n result = CliRunner().invoke(server, [\"--static-prefix\", \"mlflow/\"])\n assert \"--static-prefix must begin with a '/'.\" in result.output\n run_server_mock.assert_not_called()\n with mock.patch(\"mlflow.cli._run_server\") as run_server_mock:\n result = CliRunner().invoke(server, [\"--static-prefix\", \"/mlflow/\"])\n assert \"--static-prefix should not end with a '/'.\" in result.output\n run_server_mock.assert_not_called()\n\n\ndef test_mlflow_run():\n with mock.patch(\"mlflow.cli.projects\") as mock_projects:\n result = CliRunner().invoke(run)\n mock_projects.run.assert_not_called()\n assert 'Missing argument \"URI\"' in result.output\n\n with mock.patch(\"mlflow.cli.projects\") as mock_projects:\n CliRunner().invoke(run, [\"project_uri\"])\n mock_projects.run.assert_called_once()\n\n with mock.patch(\"mlflow.cli.projects\") as mock_projects:\n CliRunner().invoke(run, [\"--experiment-id\", \"5\", \"project_uri\"])\n mock_projects.run.assert_called_once()\n\n with mock.patch(\"mlflow.cli.projects\") as mock_projects:\n CliRunner().invoke(run, [\"--experiment-name\", \"random name\", \"project_uri\"])\n mock_projects.run.assert_called_once()\n\n with mock.patch(\"mlflow.cli.projects\") as mock_projects:\n result = CliRunner().invoke(run, [\"--experiment-id\", \"51\",\n \"--experiment-name\", \"name blah\", \"uri\"])\n mock_projects.run.assert_not_called()\n assert \"Specify only one of 'experiment-name' or 'experiment-id' options.\" in result.output\n" }, { "alpha_fraction": 0.6629781126976013, "alphanum_fraction": 0.6639403700828552, "avg_line_length": 38.21697998046875, "blob_id": "3b4f1911e754c06070ff5c5e004abd58d38df88f", "content_id": "1eaaa887a31d6555fd05cfcdaac9d4592685e727", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12471, "license_type": "permissive", "max_line_length": 100, "num_lines": 318, "path": "/mlflow/tracking/utils.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport sys\nimport warnings\n\nimport entrypoints\nfrom six.moves import urllib\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\nfrom mlflow.store import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH\nfrom mlflow.store.dbmodels.db_types import DATABASE_ENGINES\nfrom mlflow.store.file_store import FileStore\nfrom mlflow.store.rest_store import RestStore\nfrom mlflow.store.artifact_repo import ArtifactRepository\nfrom mlflow.utils import env, rest_utils\nfrom mlflow.utils.databricks_utils import get_databricks_host_creds\n\n\n_TRACKING_URI_ENV_VAR = \"MLFLOW_TRACKING_URI\"\n_LOCAL_FS_URI_PREFIX = \"file:///\"\n_REMOTE_URI_PREFIX = \"http://\"\n\n# Extra environment variables which take precedence for setting the basic/bearer\n# auth on http requests.\n_TRACKING_USERNAME_ENV_VAR = \"MLFLOW_TRACKING_USERNAME\"\n_TRACKING_PASSWORD_ENV_VAR = \"MLFLOW_TRACKING_PASSWORD\"\n_TRACKING_TOKEN_ENV_VAR = \"MLFLOW_TRACKING_TOKEN\"\n_TRACKING_INSECURE_TLS_ENV_VAR = \"MLFLOW_TRACKING_INSECURE_TLS\"\n\n_tracking_uri = None\n\n\ndef is_tracking_uri_set():\n \"\"\"Returns True if the tracking URI has been set, False otherwise.\"\"\"\n if _tracking_uri or env.get_env(_TRACKING_URI_ENV_VAR):\n return True\n return False\n\n\ndef set_tracking_uri(uri):\n \"\"\"\n Set the tracking server URI. This does not affect the\n currently active run (if one exists), but takes effect for successive runs.\n\n :param uri:\n\n - An empty string, or a local file path, prefixed with ``file:/``. Data is stored\n locally at the provided file (or ``./mlruns`` if empty).\n - An HTTP URI like ``https://my-tracking-server:5000``.\n - A Databricks workspace, provided as the string \"databricks\" or, to use a\n Databricks CLI\n `profile <https://github.com/databricks/databricks-cli#installation>`_,\n \"databricks://<profileName>\".\n \"\"\"\n global _tracking_uri\n _tracking_uri = uri\n\n\ndef get_tracking_uri():\n \"\"\"\n Get the current tracking URI. This may not correspond to the tracking URI of\n the currently active run, since the tracking URI can be updated via ``set_tracking_uri``.\n\n :return: The tracking URI.\n \"\"\"\n global _tracking_uri\n if _tracking_uri is not None:\n return _tracking_uri\n elif env.get_env(_TRACKING_URI_ENV_VAR) is not None:\n return env.get_env(_TRACKING_URI_ENV_VAR)\n else:\n return os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH)\n\n\ndef get_artifact_uri(run_id, artifact_path=None):\n \"\"\"\n Get the absolute URI of the specified artifact in the specified run. If `path` is not specified,\n the artifact root URI of the specified run will be returned; calls to ``log_artifact``\n and ``log_artifacts`` write artifact(s) to subdirectories of the artifact root URI.\n\n :param run_id: The ID of the run for which to obtain an absolute artifact URI.\n :param artifact_path: The run-relative artifact path. For example,\n ``path/to/artifact``. If unspecified, the artifact root URI for the\n specified run will be returned.\n :return: An *absolute* URI referring to the specified artifact or the specified run's artifact\n root. For example, if an artifact path is provided and the specified run uses an\n S3-backed store, this may be a uri of the form\n ``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path\n is not provided and the specified run uses an S3-backed store, this may be a URI of\n the form ``s3://<bucket_name>/path/to/artifact/root``.\n \"\"\"\n if not run_id:\n raise MlflowException(\n message=\"A run_id must be specified in order to obtain an artifact uri!\",\n error_code=INVALID_PARAMETER_VALUE)\n\n store = _get_store()\n run = store.get_run(run_id)\n if artifact_path is None:\n return run.info.artifact_uri\n else:\n # Path separators may not be consistent across all artifact repositories. Therefore, when\n # joining the run's artifact root directory with the artifact's relative path, we use the\n # path module defined by the appropriate artifact repository\n artifact_path_module =\\\n ArtifactRepository.from_artifact_uri(run.info.artifact_uri, store).get_path_module()\n return artifact_path_module.join(run.info.artifact_uri, artifact_path)\n\n\ndef _download_artifact_from_uri(artifact_uri, output_path=None):\n \"\"\"\n :param artifact_uri: The *absolute* URI of the artifact to download.\n :param output_path: The local filesystem path to which to download the artifact. If unspecified,\n a local output path will be created.\n \"\"\"\n store = _get_store(artifact_uri=artifact_uri)\n artifact_path_module =\\\n ArtifactRepository.from_artifact_uri(artifact_uri, store).get_path_module()\n artifact_src_dir = artifact_path_module.dirname(artifact_uri)\n artifact_src_relative_path = artifact_path_module.basename(artifact_uri)\n artifact_repo = ArtifactRepository.from_artifact_uri(\n artifact_uri=artifact_src_dir, store=store)\n return artifact_repo.download_artifacts(\n artifact_path=artifact_src_relative_path, dst_path=output_path)\n\n\ndef _is_local_uri(uri):\n scheme = urllib.parse.urlparse(uri).scheme\n return uri != 'databricks' and (scheme == '' or scheme == 'file')\n\n\ndef _is_http_uri(uri):\n scheme = urllib.parse.urlparse(uri).scheme\n return scheme == 'http' or scheme == 'https'\n\n\ndef _is_databricks_uri(uri):\n \"\"\"Databricks URIs look like 'databricks' (default profile) or 'databricks://profile'\"\"\"\n scheme = urllib.parse.urlparse(uri).scheme\n return scheme == 'databricks' or uri == 'databricks'\n\n\ndef _get_file_store(store_uri, **_):\n path = urllib.parse.urlparse(store_uri).path if store_uri else None\n return FileStore(path, path)\n\n\ndef _is_database_uri(uri):\n if urllib.parse.urlparse(uri).scheme not in DATABASE_ENGINES:\n return False\n return True\n\n\ndef _get_sqlalchemy_store(store_uri, artifact_uri):\n from mlflow.store.sqlalchemy_store import SqlAlchemyStore\n if artifact_uri is None:\n artifact_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH\n return SqlAlchemyStore(store_uri, artifact_uri)\n\n\ndef _get_rest_store(store_uri, **_):\n def get_default_host_creds():\n return rest_utils.MlflowHostCreds(\n host=store_uri,\n username=os.environ.get(_TRACKING_USERNAME_ENV_VAR),\n password=os.environ.get(_TRACKING_PASSWORD_ENV_VAR),\n token=os.environ.get(_TRACKING_TOKEN_ENV_VAR),\n ignore_tls_verification=os.environ.get(_TRACKING_INSECURE_TLS_ENV_VAR) == 'true',\n )\n return RestStore(get_default_host_creds)\n\n\ndef get_db_profile_from_uri(uri):\n \"\"\"\n Get the Databricks profile specified by the tracking URI (if any), otherwise\n returns None.\n \"\"\"\n parsed_uri = urllib.parse.urlparse(uri)\n if parsed_uri.scheme == \"databricks\":\n return parsed_uri.netloc\n return None\n\n\ndef _get_databricks_rest_store(store_uri, **_):\n profile = get_db_profile_from_uri(store_uri)\n return RestStore(lambda: get_databricks_host_creds(profile))\n\n\nclass TrackingStoreRegistry:\n \"\"\"Scheme-based registry for tracking store implementations\n\n This class allows the registration of a function or class to provide an\n implementation for a given scheme of `store_uri` through the `register`\n methods. Implementations declared though the entrypoints\n `mlflow.tracking_store` group can be automatically registered through the\n `register_entrypoints` method.\n\n When instantiating a store through the `get_store` method, the scheme of\n the store URI provided (or inferred from environment) will be used to\n select which implementation to instantiate, which will be called with same\n arguments passed to the `get_store` method.\n \"\"\"\n\n def __init__(self):\n self._registry = {}\n\n def register(self, scheme, store_builder):\n self._registry[scheme] = store_builder\n\n def register_entrypoints(self):\n \"\"\"Register tracking stores provided by other packages\"\"\"\n for entrypoint in entrypoints.get_group_all(\"mlflow.tracking_store\"):\n try:\n self.register(entrypoint.name, entrypoint.load())\n except (AttributeError, ImportError) as exc:\n warnings.warn(\n 'Failure attempting to register tracking store for scheme \"{}\": {}'.format(\n entrypoint.name, str(exc)\n ),\n stacklevel=2\n )\n\n def get_store(self, store_uri=None, artifact_uri=None):\n \"\"\"Get a store from the registry based on the scheme of store_uri\n\n :param store_uri: The store URI. If None, it will be inferred from the environment. This URI\n is used to select which tracking store implementation to instantiate and\n is passed to the constructor of the implementation.\n :param artifact_uri: Artifact repository URI. Passed through to the tracking store\n implementation.\n\n :return: An instance of `mlflow.store.AbstractStore` that fulfills the store URI\n requirements.\n \"\"\"\n store_uri = store_uri if store_uri is not None else get_tracking_uri()\n\n if store_uri == 'databricks':\n # Add colon so databricks is parsed as scheme\n store_uri += ':'\n\n scheme = urllib.parse.urlparse(store_uri).scheme\n try:\n store_builder = self._registry[scheme]\n except KeyError:\n raise MlflowException(\n \"Could not find a registered tracking store for: {}. \"\n \"Currently registered schemes are: {}\".format(\n store_uri, list(self._registry.keys())\n )\n )\n return store_builder(store_uri=store_uri, artifact_uri=artifact_uri)\n\n\n_tracking_store_registry = TrackingStoreRegistry()\n_tracking_store_registry.register('', _get_file_store)\n_tracking_store_registry.register('file', _get_file_store)\n_tracking_store_registry.register('databricks', _get_databricks_rest_store)\n\nfor scheme in ['http', 'https']:\n _tracking_store_registry.register(scheme, _get_rest_store)\n\nfor scheme in DATABASE_ENGINES:\n _tracking_store_registry.register(scheme, _get_sqlalchemy_store)\n\n_tracking_store_registry.register_entrypoints()\n\n\ndef _get_store(store_uri=None, artifact_uri=None):\n\n return _tracking_store_registry.get_store(store_uri, artifact_uri)\n\n\ndef _get_model_log_dir(model_name, run_id):\n if not run_id:\n raise Exception(\"Must specify a run_id to get logging directory for a model.\")\n store = _get_store()\n run = store.get_run(run_id)\n artifact_repo = ArtifactRepository.from_artifact_uri(run.info.artifact_uri, store)\n return artifact_repo.download_artifacts(model_name)\n\n\ndef _get_git_url_if_present(uri):\n \"\"\"\n Return the path git_uri#sub_directory if the URI passed is a local path that's part of\n a Git repo, or returns the original URI otherwise.\n :param uri: The expanded uri\n :return: The git_uri#sub_directory if the uri is part of a Git repo,\n otherwise return the original uri\n \"\"\"\n if '#' in uri:\n # Already a URI in git repo format\n return uri\n try:\n from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError\n except ImportError as e:\n print(\"Notice: failed to import Git (the git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\" % e, file=sys.stderr)\n return uri\n try:\n # Check whether this is part of a git repo\n repo = Repo(uri, search_parent_directories=True)\n\n # Repo url\n repo_url = \"file://%s\" % repo.working_tree_dir\n\n # Sub directory\n rlpath = uri.replace(repo.working_tree_dir, '')\n if (rlpath == ''):\n git_path = repo_url\n elif (rlpath[0] == '/'):\n git_path = repo_url + '#' + rlpath[1:]\n else:\n git_path = repo_url + '#' + rlpath\n return git_path\n except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):\n return uri\n" }, { "alpha_fraction": 0.6515693068504333, "alphanum_fraction": 0.657528817653656, "avg_line_length": 34.45070266723633, "blob_id": "6e3e4b994af8ce0d580da2019287d221459b6d57", "content_id": "db20198ea6b761c9cc1bd20d284e319f89cee202", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2517, "license_type": "permissive", "max_line_length": 85, "num_lines": 71, "path": "/tests/tracking/test_fluent.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import os\nimport random\n\nimport mlflow\nfrom mlflow.entities import Experiment\nfrom mlflow.tracking.fluent import _get_experiment_id, _get_experiment_id_from_env, \\\n _EXPERIMENT_NAME_ENV_VAR, _EXPERIMENT_ID_ENV_VAR\nfrom mlflow.utils.file_utils import TempDir\n\n\nclass HelperEnv:\n @classmethod\n def assert_values(cls, exp_id, name):\n assert os.environ.get(_EXPERIMENT_NAME_ENV_VAR) == name\n assert os.environ.get(_EXPERIMENT_ID_ENV_VAR) == exp_id\n\n @classmethod\n def set_values(cls, id=None, name=None):\n if id:\n os.environ[_EXPERIMENT_ID_ENV_VAR] = str(id)\n elif os.environ.get(_EXPERIMENT_ID_ENV_VAR):\n del os.environ[_EXPERIMENT_ID_ENV_VAR]\n\n if name:\n os.environ[_EXPERIMENT_NAME_ENV_VAR] = str(name)\n elif os.environ.get(_EXPERIMENT_NAME_ENV_VAR):\n del os.environ[_EXPERIMENT_NAME_ENV_VAR]\n\n\ndef test_get_experiment_id_from_env():\n # When no env variables are set\n HelperEnv.assert_values(None, None)\n assert _get_experiment_id_from_env() is None\n\n # set only ID\n random_id = random.randint(1, 1e6)\n HelperEnv.set_values(id=random_id)\n HelperEnv.assert_values(str(random_id), None)\n assert _get_experiment_id_from_env() == str(random_id)\n\n # set only name\n with TempDir(chdr=True):\n name = \"random experiment %d\" % random.randint(1, 1e6)\n exp_id = mlflow.create_experiment(name)\n assert exp_id is not None\n HelperEnv.set_values(name=name)\n HelperEnv.assert_values(None, name)\n assert _get_experiment_id_from_env() == exp_id\n\n # set both: assert that name variable takes precedence\n with TempDir(chdr=True):\n name = \"random experiment %d\" % random.randint(1, 1e6)\n exp_id = mlflow.create_experiment(name)\n assert exp_id is not None\n random_id = random.randint(1, 1e6)\n HelperEnv.set_values(name=name, id=random_id)\n HelperEnv.assert_values(str(random_id), name)\n assert _get_experiment_id_from_env() == exp_id\n\n\ndef test_get_experiment_id():\n # When no experiment is active should return default\n assert _get_experiment_id() == Experiment.DEFAULT_EXPERIMENT_ID\n\n # Create a new experiment and set that as active experiment\n with TempDir(chdr=True):\n name = \"Random experiment %d\" % random.randint(1, 1e6)\n exp_id = mlflow.create_experiment(name)\n assert exp_id is not None\n mlflow.set_experiment(name)\n assert _get_experiment_id() == exp_id\n" }, { "alpha_fraction": 0.6088466644287109, "alphanum_fraction": 0.6199924349784851, "avg_line_length": 42.494144439697266, "blob_id": "ea824bb159ff3c1d3a78bbc67d4bc2c22fab4bf1", "content_id": "0d3b9f6346a277edfb2e24a906f86937a87ade88", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37144, "license_type": "permissive", "max_line_length": 98, "num_lines": 854, "path": "/tests/store/test_sqlalchemy_store.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import unittest\nimport warnings\n\nimport sqlalchemy\nimport time\nimport mlflow\nimport uuid\n\nfrom mlflow.entities import ViewType, RunTag, SourceType, RunStatus\nfrom mlflow.protos.service_pb2 import SearchExpression\nfrom mlflow.store.dbmodels import models\nfrom mlflow import entities\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.store.sqlalchemy_store import SqlAlchemyStore\nfrom mlflow.utils.file_utils import TempDir\n\nDB_URI = 'sqlite://'\nARTIFACT_URI = 'file://fake file'\n\n\nclass TestSqlAlchemyStoreSqliteInMemory(unittest.TestCase):\n def _setup_database(self, filename=''):\n # use a static file name to initialize sqllite to test retention.\n self.store = SqlAlchemyStore(DB_URI + filename, ARTIFACT_URI)\n self.session = self.store.session\n\n def setUp(self):\n self.maxDiff = None # print all differences on assert failures\n self.store = None\n self.session = None\n self._setup_database()\n\n def tearDown(self):\n if self.store:\n models.Base.metadata.drop_all(self.store.engine)\n\n def _experiment_factory(self, names):\n if type(names) is list:\n return [self.store.create_experiment(name=name) for name in names]\n\n return self.store.create_experiment(name=names)\n\n def test_default_experiment(self):\n experiments = self.store.list_experiments()\n self.assertEqual(len(experiments), 1)\n\n first = experiments[0]\n self.assertEqual(first.experiment_id, 0)\n self.assertEqual(first.name, \"Default\")\n\n def test_default_experiment_lifecycle(self):\n with TempDir(chdr=True) as tmp:\n tmp_file_name = \"sqlite_file_to_lifecycle_test_{}.db\".format(int(time.time()))\n self._setup_database(\"/\" + tmp.path(tmp_file_name))\n default = self.session.query(models.SqlExperiment).filter_by(name='Default').first()\n self.assertEqual(default.experiment_id, 0)\n self.assertEqual(default.lifecycle_stage, entities.LifecycleStage.ACTIVE)\n\n self._experiment_factory('aNothEr')\n all_experiments = [e.name for e in self.store.list_experiments()]\n\n self.assertSequenceEqual(set(['aNothEr', 'Default']), set(all_experiments))\n\n self.store.delete_experiment(0)\n\n self.assertSequenceEqual(['aNothEr'], [e.name for e in self.store.list_experiments()])\n another = self.store.get_experiment(1)\n self.assertEqual('aNothEr', another.name)\n\n default = self.session.query(models.SqlExperiment).filter_by(name='Default').first()\n self.assertEqual(default.experiment_id, 0)\n self.assertEqual(default.lifecycle_stage, entities.LifecycleStage.DELETED)\n\n # destroy SqlStore and make a new one\n del self.store\n self._setup_database(\"/\" + tmp.path(tmp_file_name))\n\n # test that default experiment is not reactivated\n default = self.session.query(models.SqlExperiment).filter_by(name='Default').first()\n self.assertEqual(default.experiment_id, 0)\n self.assertEqual(default.lifecycle_stage, entities.LifecycleStage.DELETED)\n\n self.assertSequenceEqual(['aNothEr'], [e.name for e in self.store.list_experiments()])\n all_experiments = [e.name for e in self.store.list_experiments(ViewType.ALL)]\n self.assertSequenceEqual(set(['aNothEr', 'Default']), set(all_experiments))\n\n # ensure that experiment ID dor active experiment is unchanged\n another = self.store.get_experiment(1)\n self.assertEqual('aNothEr', another.name)\n\n self.session.close()\n self.store = None\n\n def test_raise_duplicate_experiments(self):\n with self.assertRaises(Exception):\n self._experiment_factory(['test', 'test'])\n\n def test_raise_experiment_dont_exist(self):\n with self.assertRaises(Exception):\n self.store.get_experiment(experiment_id=100)\n\n def test_delete_experiment(self):\n experiments = self._experiment_factory(['morty', 'rick', 'rick and morty'])\n\n all_experiments = self.store.list_experiments()\n self.assertEqual(len(all_experiments), len(experiments) + 1) # default\n\n exp = experiments[0]\n self.store.delete_experiment(exp)\n\n actual = self.session.query(models.SqlExperiment).get(exp)\n self.assertEqual(len(self.store.list_experiments()), len(all_experiments) - 1)\n\n self.assertEqual(actual.lifecycle_stage, entities.LifecycleStage.DELETED)\n\n def test_get_experiment(self):\n name = 'goku'\n experiment_id = self._experiment_factory(name)\n actual = self.store.get_experiment(experiment_id)\n self.assertEqual(actual.name, name)\n self.assertEqual(actual.experiment_id, experiment_id)\n\n actual_by_name = self.store.get_experiment_by_name(name)\n self.assertEqual(actual_by_name.name, name)\n self.assertEqual(actual_by_name.experiment_id, experiment_id)\n\n def test_list_experiments(self):\n testnames = ['blue', 'red', 'green']\n\n experiments = self._experiment_factory(testnames)\n actual = self.store.list_experiments()\n\n self.assertEqual(len(experiments) + 1, len(actual)) # default\n\n for experiment_id in experiments:\n res = self.session.query(models.SqlExperiment).filter_by(\n experiment_id=experiment_id).first()\n self.assertIn(res.name, testnames)\n self.assertEqual(res.experiment_id, experiment_id)\n\n def test_create_experiments(self):\n result = self.session.query(models.SqlExperiment).all()\n self.assertEqual(len(result), 1)\n\n experiment_id = self.store.create_experiment(name='test exp')\n result = self.session.query(models.SqlExperiment).all()\n self.assertEqual(len(result), 2)\n\n test_exp = self.session.query(models.SqlExperiment).filter_by(name='test exp').first()\n\n self.assertEqual(test_exp.experiment_id, experiment_id)\n self.assertEqual(test_exp.name, 'test exp')\n\n actual = self.store.get_experiment(experiment_id)\n self.assertEqual(actual.experiment_id, experiment_id)\n self.assertEqual(actual.name, 'test exp')\n\n def test_run_tag_model(self):\n run_data = models.SqlTag(run_uuid='tuuid', key='test', value='val')\n self.session.add(run_data)\n self.session.commit()\n tags = self.session.query(models.SqlTag).all()\n self.assertEqual(len(tags), 1)\n\n actual = tags[0].to_mlflow_entity()\n\n self.assertEqual(actual.value, run_data.value)\n self.assertEqual(actual.key, run_data.key)\n\n def test_metric_model(self):\n run_data = models.SqlMetric(run_uuid='testuid', key='accuracy', value=0.89)\n self.session.add(run_data)\n self.session.commit()\n metrics = self.session.query(models.SqlMetric).all()\n self.assertEqual(len(metrics), 1)\n\n actual = metrics[0].to_mlflow_entity()\n\n self.assertEqual(actual.value, run_data.value)\n self.assertEqual(actual.key, run_data.key)\n\n def test_param_model(self):\n run_data = models.SqlParam(run_uuid='test', key='accuracy', value='test param')\n self.session.add(run_data)\n self.session.commit()\n params = self.session.query(models.SqlParam).all()\n self.assertEqual(len(params), 1)\n\n actual = params[0].to_mlflow_entity()\n\n self.assertEqual(actual.value, run_data.value)\n self.assertEqual(actual.key, run_data.key)\n\n def test_run_needs_uuid(self):\n run = models.SqlRun()\n self.session.add(run)\n\n with self.assertRaises(sqlalchemy.exc.IntegrityError):\n warnings.simplefilter(\"ignore\")\n with warnings.catch_warnings():\n self.session.commit()\n warnings.resetwarnings()\n\n def test_run_data_model(self):\n m1 = models.SqlMetric(key='accuracy', value=0.89)\n m2 = models.SqlMetric(key='recal', value=0.89)\n p1 = models.SqlParam(key='loss', value='test param')\n p2 = models.SqlParam(key='blue', value='test param')\n\n self.session.add_all([m1, m2, p1, p2])\n\n run_data = models.SqlRun(run_uuid=uuid.uuid4().hex)\n run_data.params.append(p1)\n run_data.params.append(p2)\n run_data.metrics.append(m1)\n run_data.metrics.append(m2)\n\n self.session.add(run_data)\n self.session.commit()\n\n run_datums = self.session.query(models.SqlRun).all()\n actual = run_datums[0]\n self.assertEqual(len(run_datums), 1)\n self.assertEqual(len(actual.params), 2)\n self.assertEqual(len(actual.metrics), 2)\n\n def test_run_info(self):\n experiment_id = self._experiment_factory('test exp')\n config = {\n 'experiment_id': experiment_id,\n 'name': 'test run',\n 'user_id': 'Anderson',\n 'run_uuid': 'test',\n 'status': RunStatus.to_string(RunStatus.SCHEDULED),\n 'source_type': SourceType.to_string(SourceType.LOCAL),\n 'source_name': 'Python application',\n 'entry_point_name': 'main.py',\n 'start_time': int(time.time()),\n 'end_time': int(time.time()),\n 'source_version': mlflow.__version__,\n 'lifecycle_stage': entities.LifecycleStage.ACTIVE,\n 'artifact_uri': '//'\n }\n run = models.SqlRun(**config).to_mlflow_entity()\n\n for k, v in config.items():\n v2 = getattr(run.info, k)\n if k == 'source_type':\n self.assertEqual(v, SourceType.to_string(v2))\n elif k == 'status':\n self.assertEqual(v, RunStatus.to_string(v2))\n else:\n self.assertEqual(v, v2)\n\n def _get_run_configs(self, name='test', experiment_id=None):\n return {\n 'experiment_id': experiment_id,\n 'name': name,\n 'user_id': 'Anderson',\n 'run_uuid': uuid.uuid4().hex,\n 'status': RunStatus.to_string(RunStatus.SCHEDULED),\n 'source_type': SourceType.to_string(SourceType.NOTEBOOK),\n 'source_name': 'Python application',\n 'entry_point_name': 'main.py',\n 'start_time': int(time.time()),\n 'end_time': int(time.time()),\n 'source_version': mlflow.__version__,\n 'lifecycle_stage': entities.LifecycleStage.ACTIVE,\n 'artifact_uri': '//'\n }\n\n def _run_factory(self, config=None):\n if not config:\n config = self._get_run_configs()\n\n experiment_id = config.get(\"experiment_id\", None)\n if not experiment_id:\n experiment_id = self._experiment_factory('test exp')\n config[\"experiment_id\"] = experiment_id\n\n run = models.SqlRun(**config)\n self.session.add(run)\n\n return run\n\n def test_create_run(self):\n experiment_id = self._experiment_factory('test_create_run')\n expected = self._get_run_configs('booyya', experiment_id=experiment_id)\n\n tags = [RunTag('3', '4'), RunTag('1', '2')]\n actual = self.store.create_run(expected[\"experiment_id\"], expected[\"user_id\"],\n expected[\"name\"],\n SourceType.from_string(expected[\"source_type\"]),\n expected[\"source_name\"], expected[\"entry_point_name\"],\n expected[\"start_time\"], expected[\"source_version\"],\n tags, None)\n\n self.assertEqual(actual.info.experiment_id, expected[\"experiment_id\"])\n self.assertEqual(actual.info.user_id, expected[\"user_id\"])\n self.assertEqual(actual.info.name, 'booyya')\n self.assertEqual(actual.info.source_type, SourceType.from_string(expected[\"source_type\"]))\n self.assertEqual(actual.info.source_name, expected[\"source_name\"])\n self.assertEqual(actual.info.source_version, expected[\"source_version\"])\n self.assertEqual(actual.info.entry_point_name, expected[\"entry_point_name\"])\n self.assertEqual(actual.info.start_time, expected[\"start_time\"])\n self.assertEqual(len(actual.data.tags), 3)\n\n name_tag = models.SqlTag(key='mlflow.runName', value='booyya').to_mlflow_entity()\n self.assertListEqual(actual.data.tags, tags + [name_tag])\n\n def test_create_run_with_parent_id(self):\n exp = self._experiment_factory('test_create_run_with_parent_id')\n expected = self._get_run_configs('booyya', experiment_id=exp)\n\n tags = [RunTag('3', '4'), RunTag('1', '2')]\n actual = self.store.create_run(expected[\"experiment_id\"], expected[\"user_id\"],\n expected[\"name\"],\n SourceType.from_string(expected[\"source_type\"]),\n expected[\"source_name\"], expected[\"entry_point_name\"],\n expected[\"start_time\"], expected[\"source_version\"],\n tags, \"parent_uuid_5\")\n\n self.assertEqual(actual.info.experiment_id, expected[\"experiment_id\"])\n self.assertEqual(actual.info.user_id, expected[\"user_id\"])\n self.assertEqual(actual.info.name, 'booyya')\n self.assertEqual(actual.info.source_type, SourceType.from_string(expected[\"source_type\"]))\n self.assertEqual(actual.info.source_name, expected[\"source_name\"])\n self.assertEqual(actual.info.source_version, expected[\"source_version\"])\n self.assertEqual(actual.info.entry_point_name, expected[\"entry_point_name\"])\n self.assertEqual(actual.info.start_time, expected[\"start_time\"])\n self.assertEqual(len(actual.data.tags), 4)\n\n name_tag = models.SqlTag(key='mlflow.runName', value='booyya').to_mlflow_entity()\n parent_id_tag = models.SqlTag(key='mlflow.parentRunId',\n value='parent_uuid_5').to_mlflow_entity()\n self.assertListEqual(actual.data.tags, tags + [parent_id_tag, name_tag])\n\n def test_to_mlflow_entity(self):\n run = self._run_factory()\n run = run.to_mlflow_entity()\n\n self.assertIsInstance(run.info, entities.RunInfo)\n self.assertIsInstance(run.data, entities.RunData)\n\n for metric in run.data.metrics:\n self.assertIsInstance(metric, entities.Metric)\n\n for param in run.data.params:\n self.assertIsInstance(param, entities.Param)\n\n for tag in run.data.tags:\n self.assertIsInstance(tag, entities.RunTag)\n\n def test_delete_run(self):\n run = self._run_factory()\n self.session.commit()\n\n run_uuid = run.run_uuid\n self.store.delete_run(run_uuid)\n actual = self.session.query(models.SqlRun).filter_by(run_uuid=run_uuid).first()\n self.assertEqual(actual.lifecycle_stage, entities.LifecycleStage.DELETED)\n\n deleted_run = self.store.get_run(run_uuid)\n self.assertEqual(actual.run_uuid, deleted_run.info.run_uuid)\n\n def test_log_metric(self):\n run = self._run_factory()\n\n self.session.commit()\n\n tkey = 'blahmetric'\n tval = 100.0\n metric = entities.Metric(tkey, tval, int(time.time()))\n metric2 = entities.Metric(tkey, tval, int(time.time()) + 2)\n self.store.log_metric(run.run_uuid, metric)\n self.store.log_metric(run.run_uuid, metric2)\n\n actual = self.session.query(models.SqlMetric).filter_by(key=tkey, value=tval)\n\n self.assertIsNotNone(actual)\n\n run = self.store.get_run(run.run_uuid)\n\n # SQL store _get_run method returns full history of recorded metrics.\n # Should return duplicates as well\n # MLflow RunData contains only the last reported values for metrics.\n sql_run_metrics = self.store._get_run(run.info.run_uuid).metrics\n self.assertEqual(2, len(sql_run_metrics))\n self.assertEqual(1, len(run.data.metrics))\n\n found = False\n for m in run.data.metrics:\n if m.key == tkey and m.value == tval:\n found = True\n\n self.assertTrue(found)\n\n def test_log_metric_uniqueness(self):\n run = self._run_factory()\n\n self.session.commit()\n\n tkey = 'blahmetric'\n tval = 100.0\n metric = entities.Metric(tkey, tval, int(time.time()))\n metric2 = entities.Metric(tkey, 1.02, int(time.time()))\n self.store.log_metric(run.run_uuid, metric)\n\n with self.assertRaises(MlflowException) as e:\n self.store.log_metric(run.run_uuid, metric2)\n self.assertIn(\"must be unique. Metric already logged value\", e.exception.message)\n\n def test_log_null_metric(self):\n run = self._run_factory()\n\n self.session.commit()\n\n tkey = 'blahmetric'\n tval = None\n metric = entities.Metric(tkey, tval, int(time.time()))\n\n with self.assertRaises(MlflowException) as e:\n self.store.log_metric(run.run_uuid, metric)\n self.assertIn(\"Log metric request failed for run ID=\", e.exception.message)\n self.assertIn(\"IntegrityError\", e.exception.message)\n\n def test_log_param(self):\n run = self._run_factory()\n\n self.session.commit()\n\n tkey = 'blahmetric'\n tval = '100.0'\n param = entities.Param(tkey, tval)\n param2 = entities.Param('new param', 'new key')\n self.store.log_param(run.run_uuid, param)\n self.store.log_param(run.run_uuid, param2)\n\n actual = self.session.query(models.SqlParam).filter_by(key=tkey, value=tval)\n self.assertIsNotNone(actual)\n\n run = self.store.get_run(run.run_uuid)\n self.assertEqual(2, len(run.data.params))\n\n found = False\n for m in run.data.params:\n if m.key == tkey and m.value == tval:\n found = True\n\n self.assertTrue(found)\n\n def test_log_param_uniqueness(self):\n run = self._run_factory()\n\n self.session.commit()\n\n tkey = 'blahmetric'\n tval = '100.0'\n param = entities.Param(tkey, tval)\n param2 = entities.Param(tkey, 'newval')\n self.store.log_param(run.run_uuid, param)\n\n with self.assertRaises(MlflowException) as e:\n self.store.log_param(run.run_uuid, param2)\n self.assertIn(\"Changing param value is not allowed. Param with key=\", e.exception.message)\n\n def test_log_null_param(self):\n run = self._run_factory()\n\n self.session.commit()\n\n tkey = 'blahmetric'\n tval = None\n param = entities.Param(tkey, tval)\n\n with self.assertRaises(MlflowException) as e:\n self.store.log_param(run.run_uuid, param)\n self.assertIn(\"Log param request failed for run ID=\", e.exception.message)\n self.assertIn(\"IntegrityError\", e.exception.message)\n\n def test_set_tag(self):\n run = self._run_factory()\n\n self.session.commit()\n\n tkey = 'test tag'\n tval = 'a boogie'\n tag = entities.RunTag(tkey, tval)\n self.store.set_tag(run.run_uuid, tag)\n\n actual = self.session.query(models.SqlTag).filter_by(key=tkey, value=tval)\n\n self.assertIsNotNone(actual)\n\n run = self.store.get_run(run.run_uuid)\n\n found = False\n for m in run.data.tags:\n if m.key == tkey and m.value == tval:\n found = True\n\n self.assertTrue(found)\n\n def test_get_metric_history(self):\n run = self._run_factory()\n self.session.commit()\n key = 'test'\n expected = [\n models.SqlMetric(key=key, value=0.6, timestamp=1).to_mlflow_entity(),\n models.SqlMetric(key=key, value=0.7, timestamp=2).to_mlflow_entity()\n ]\n\n for metric in expected:\n self.store.log_metric(run.run_uuid, metric)\n\n actual = self.store.get_metric_history(run.run_uuid, key)\n\n self.assertSequenceEqual([(m.key, m.value, m.timestamp) for m in expected],\n [(m.key, m.value, m.timestamp) for m in actual])\n\n def test_list_run_infos(self):\n experiment_id = self._experiment_factory('test_exp')\n r1 = self._run_factory(self._get_run_configs('t1', experiment_id)).run_uuid\n r2 = self._run_factory(self._get_run_configs('t2', experiment_id)).run_uuid\n\n def _runs(experiment_id, view_type):\n return [r.run_uuid for r in self.store.list_run_infos(experiment_id, view_type)]\n\n self.assertSequenceEqual([r1, r2], _runs(experiment_id, ViewType.ALL))\n self.assertSequenceEqual([r1, r2], _runs(experiment_id, ViewType.ACTIVE_ONLY))\n self.assertEqual(0, len(_runs(experiment_id, ViewType.DELETED_ONLY)))\n\n self.store.delete_run(r1)\n self.assertSequenceEqual([r1, r2], _runs(experiment_id, ViewType.ALL))\n self.assertSequenceEqual([r2], _runs(experiment_id, ViewType.ACTIVE_ONLY))\n self.assertSequenceEqual([r1], _runs(experiment_id, ViewType.DELETED_ONLY))\n\n def test_rename_experiment(self):\n new_name = 'new name'\n experiment_id = self._experiment_factory('test name')\n self.store.rename_experiment(experiment_id, new_name)\n\n renamed_experiment = self.store.get_experiment(experiment_id)\n\n self.assertEqual(renamed_experiment.name, new_name)\n\n def test_update_run_info(self):\n run = self._run_factory()\n new_status = entities.RunStatus.FINISHED\n endtime = int(time.time())\n\n actual = self.store.update_run_info(run.run_uuid, new_status, endtime)\n\n self.assertEqual(actual.status, new_status)\n self.assertEqual(actual.end_time, endtime)\n\n def test_restore_experiment(self):\n experiment_id = self._experiment_factory('helloexp')\n exp = self.store.get_experiment(experiment_id)\n self.assertEqual(exp.lifecycle_stage, entities.LifecycleStage.ACTIVE)\n\n experiment_id = exp.experiment_id\n self.store.delete_experiment(experiment_id)\n\n deleted = self.store.get_experiment(experiment_id)\n self.assertEqual(deleted.experiment_id, experiment_id)\n self.assertEqual(deleted.lifecycle_stage, entities.LifecycleStage.DELETED)\n\n self.store.restore_experiment(exp.experiment_id)\n restored = self.store.get_experiment(exp.experiment_id)\n self.assertEqual(restored.experiment_id, experiment_id)\n self.assertEqual(restored.lifecycle_stage, entities.LifecycleStage.ACTIVE)\n\n def test_delete_restore_run(self):\n run = self._run_factory()\n self.assertEqual(run.lifecycle_stage, entities.LifecycleStage.ACTIVE)\n\n run_uuid = run.run_uuid\n\n with self.assertRaises(MlflowException) as e:\n self.store.restore_run(run_uuid)\n self.assertIn(\"must be in 'deleted' state\", e.exception.message)\n\n self.store.delete_run(run_uuid)\n with self.assertRaises(MlflowException) as e:\n self.store.delete_run(run_uuid)\n self.assertIn(\"must be in 'active' state\", e.exception.message)\n\n deleted = self.store.get_run(run_uuid)\n self.assertEqual(deleted.info.run_uuid, run_uuid)\n self.assertEqual(deleted.info.lifecycle_stage, entities.LifecycleStage.DELETED)\n\n self.store.restore_run(run_uuid)\n with self.assertRaises(MlflowException) as e:\n self.store.restore_run(run_uuid)\n self.assertIn(\"must be in 'deleted' state\", e.exception.message)\n restored = self.store.get_run(run_uuid)\n self.assertEqual(restored.info.run_uuid, run_uuid)\n self.assertEqual(restored.info.lifecycle_stage, entities.LifecycleStage.ACTIVE)\n\n def test_error_logging_to_deleted_run(self):\n exp = self._experiment_factory('error_logging')\n run_uuid = self._run_factory(self._get_run_configs(experiment_id=exp)).run_uuid\n\n self.store.delete_run(run_uuid)\n self.assertEqual(self.store.get_run(run_uuid).info.lifecycle_stage,\n entities.LifecycleStage.DELETED)\n with self.assertRaises(MlflowException) as e:\n self.store.log_param(run_uuid, entities.Param(\"p1345\", \"v1\"))\n self.assertIn(\"must be in 'active' state\", e.exception.message)\n\n with self.assertRaises(MlflowException) as e:\n self.store.log_metric(run_uuid, entities.Metric(\"m1345\", 1.0, 123))\n self.assertIn(\"must be in 'active' state\", e.exception.message)\n\n with self.assertRaises(MlflowException) as e:\n self.store.set_tag(run_uuid, entities.RunTag(\"t1345\", \"tv1\"))\n self.assertIn(\"must be in 'active' state\", e.exception.message)\n\n # restore this run and try again\n self.store.restore_run(run_uuid)\n self.assertEqual(self.store.get_run(run_uuid).info.lifecycle_stage,\n entities.LifecycleStage.ACTIVE)\n self.store.log_param(run_uuid, entities.Param(\"p1345\", \"v22\"))\n self.store.log_metric(run_uuid, entities.Metric(\"m1345\", 34.0, 85)) # earlier timestamp\n self.store.set_tag(run_uuid, entities.RunTag(\"t1345\", \"tv44\"))\n\n run = self.store.get_run(run_uuid)\n assert len(run.data.params) == 1\n p = run.data.params[0]\n self.assertEqual(p.key, \"p1345\")\n self.assertEqual(p.value, \"v22\")\n assert len(run.data.metrics) == 1\n m = run.data.metrics[0]\n self.assertEqual(m.key, \"m1345\")\n self.assertEqual(m.value, 34.0)\n run = self.store.get_run(run_uuid)\n self.assertEqual([(\"p1345\", \"v22\")],\n [(p.key, p.value) for p in run.data.params if p.key == \"p1345\"])\n self.assertEqual([(\"m1345\", 34.0, 85)],\n [(m.key, m.value, m.timestamp)\n for m in run.data.metrics if m.key == \"m1345\"])\n self.assertEqual([(\"t1345\", \"tv44\")],\n [(t.key, t.value) for t in run.data.tags if t.key == \"t1345\"])\n\n # Tests for Search API\n def _search(self, experiment_id, metrics_expressions=None, param_expressions=None,\n run_view_type=ViewType.ALL):\n conditions = (metrics_expressions or []) + (param_expressions or [])\n return [r.info.run_uuid\n for r in self.store.search_runs([experiment_id], conditions, run_view_type)]\n\n def _param_expression(self, key, comparator, val):\n expr = SearchExpression()\n expr.parameter.key = key\n expr.parameter.string.comparator = comparator\n expr.parameter.string.value = val\n return expr\n\n def _metric_expression(self, key, comparator, val):\n expr = SearchExpression()\n expr.metric.key = key\n expr.metric.double.comparator = comparator\n expr.metric.double.value = val\n return expr\n\n def test_search_vanilla(self):\n exp = self._experiment_factory('search_vanilla')\n runs = [self._run_factory(self._get_run_configs('r_%d' % r, exp)).run_uuid\n for r in range(3)]\n\n self.assertSequenceEqual(runs, self._search(exp, run_view_type=ViewType.ALL))\n self.assertSequenceEqual(runs, self._search(exp, run_view_type=ViewType.ACTIVE_ONLY))\n self.assertSequenceEqual([], self._search(exp, run_view_type=ViewType.DELETED_ONLY))\n\n first = runs[0]\n\n self.store.delete_run(first)\n self.assertSequenceEqual(runs, self._search(exp, run_view_type=ViewType.ALL))\n self.assertSequenceEqual(runs[1:], self._search(exp, run_view_type=ViewType.ACTIVE_ONLY))\n self.assertSequenceEqual([first], self._search(exp, run_view_type=ViewType.DELETED_ONLY))\n\n self.store.restore_run(first)\n self.assertSequenceEqual(runs, self._search(exp, run_view_type=ViewType.ALL))\n self.assertSequenceEqual(runs, self._search(exp, run_view_type=ViewType.ACTIVE_ONLY))\n self.assertSequenceEqual([], self._search(exp, run_view_type=ViewType.DELETED_ONLY))\n\n def test_search_params(self):\n experiment_id = self._experiment_factory('search_params')\n r1 = self._run_factory(self._get_run_configs('r1', experiment_id)).run_uuid\n r2 = self._run_factory(self._get_run_configs('r2', experiment_id)).run_uuid\n\n self.store.log_param(r1, entities.Param('generic_param', 'p_val'))\n self.store.log_param(r2, entities.Param('generic_param', 'p_val'))\n\n self.store.log_param(r1, entities.Param('generic_2', 'some value'))\n self.store.log_param(r2, entities.Param('generic_2', 'another value'))\n\n self.store.log_param(r1, entities.Param('p_a', 'abc'))\n self.store.log_param(r2, entities.Param('p_b', 'ABC'))\n\n # test search returns both runs\n expr = self._param_expression(\"generic_param\", \"=\", \"p_val\")\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n # test search returns appropriate run (same key different values per run)\n expr = self._param_expression(\"generic_2\", \"=\", \"some value\")\n self.assertSequenceEqual([r1], self._search(experiment_id, param_expressions=[expr]))\n expr = self._param_expression(\"generic_2\", \"=\", \"another value\")\n self.assertSequenceEqual([r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._param_expression(\"generic_param\", \"=\", \"wrong_val\")\n self.assertSequenceEqual([], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._param_expression(\"generic_param\", \"!=\", \"p_val\")\n self.assertSequenceEqual([], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._param_expression(\"generic_param\", \"!=\", \"wrong_val\")\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n expr = self._param_expression(\"generic_2\", \"!=\", \"wrong_val\")\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._param_expression(\"p_a\", \"=\", \"abc\")\n self.assertSequenceEqual([r1], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._param_expression(\"p_b\", \"=\", \"ABC\")\n self.assertSequenceEqual([r2], self._search(experiment_id, param_expressions=[expr]))\n\n def test_search_metrics(self):\n experiment_id = self._experiment_factory('search_params')\n r1 = self._run_factory(self._get_run_configs('r1', experiment_id)).run_uuid\n r2 = self._run_factory(self._get_run_configs('r2', experiment_id)).run_uuid\n\n self.store.log_metric(r1, entities.Metric(\"common\", 1.0, 1))\n self.store.log_metric(r2, entities.Metric(\"common\", 1.0, 1))\n\n self.store.log_metric(r1, entities.Metric(\"measure_a\", 1.0, 1))\n self.store.log_metric(r2, entities.Metric(\"measure_a\", 200.0, 2))\n self.store.log_metric(r2, entities.Metric(\"measure_a\", 400.0, 3))\n\n self.store.log_metric(r1, entities.Metric(\"m_a\", 2.0, 2))\n self.store.log_metric(r2, entities.Metric(\"m_b\", 3.0, 2))\n self.store.log_metric(r2, entities.Metric(\"m_b\", 4.0, 8)) # this is last timestamp\n self.store.log_metric(r2, entities.Metric(\"m_b\", 8.0, 3))\n\n expr = self._metric_expression(\"common\", \"=\", 1.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"common\", \">\", 0.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"common\", \">=\", 0.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"common\", \"<\", 4.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"common\", \"<=\", 4.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"common\", \"!=\", 1.0)\n self.assertSequenceEqual([], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"common\", \">=\", 3.0)\n self.assertSequenceEqual([], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"common\", \"<=\", 0.75)\n self.assertSequenceEqual([], self._search(experiment_id, param_expressions=[expr]))\n\n # tests for same metric name across runs with different values and timestamps\n expr = self._metric_expression(\"measure_a\", \">\", 0.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"measure_a\", \"<\", 50.0)\n self.assertSequenceEqual([r1], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"measure_a\", \"<\", 1000.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"measure_a\", \"!=\", -12.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"measure_a\", \">\", 50.0)\n self.assertSequenceEqual([r2], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"measure_a\", \"=\", 1.0)\n self.assertSequenceEqual([r1], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"measure_a\", \"=\", 400.0)\n self.assertSequenceEqual([r2], self._search(experiment_id, param_expressions=[expr]))\n\n # test search with unique metric keys\n expr = self._metric_expression(\"m_a\", \">\", 1.0)\n self.assertSequenceEqual([r1], self._search(experiment_id, param_expressions=[expr]))\n\n expr = self._metric_expression(\"m_b\", \">\", 1.0)\n self.assertSequenceEqual([r2], self._search(experiment_id, param_expressions=[expr]))\n\n # there is a recorded metric this threshold but not last timestamp\n expr = self._metric_expression(\"m_b\", \">\", 5.0)\n self.assertSequenceEqual([], self._search(experiment_id, param_expressions=[expr]))\n\n # metrics matches last reported timestamp for 'm_b'\n expr = self._metric_expression(\"m_b\", \"=\", 4.0)\n self.assertSequenceEqual([r2], self._search(experiment_id, param_expressions=[expr]))\n\n def test_search_full(self):\n experiment_id = self._experiment_factory('search_params')\n r1 = self._run_factory(self._get_run_configs('r1', experiment_id)).run_uuid\n r2 = self._run_factory(self._get_run_configs('r2', experiment_id)).run_uuid\n\n self.store.log_param(r1, entities.Param('generic_param', 'p_val'))\n self.store.log_param(r2, entities.Param('generic_param', 'p_val'))\n\n self.store.log_param(r1, entities.Param('p_a', 'abc'))\n self.store.log_param(r2, entities.Param('p_b', 'ABC'))\n\n self.store.log_metric(r1, entities.Metric(\"common\", 1.0, 1))\n self.store.log_metric(r2, entities.Metric(\"common\", 1.0, 1))\n\n self.store.log_metric(r1, entities.Metric(\"m_a\", 2.0, 2))\n self.store.log_metric(r2, entities.Metric(\"m_b\", 3.0, 2))\n self.store.log_metric(r2, entities.Metric(\"m_b\", 4.0, 8))\n self.store.log_metric(r2, entities.Metric(\"m_b\", 8.0, 3))\n\n p_expr = self._param_expression(\"generic_param\", \"=\", \"p_val\")\n m_expr = self._metric_expression(\"common\", \"=\", 1.0)\n self.assertSequenceEqual([r1, r2], self._search(experiment_id,\n param_expressions=[p_expr],\n metrics_expressions=[m_expr]))\n\n # all params and metrics match\n p_expr = self._param_expression(\"generic_param\", \"=\", \"p_val\")\n m1_expr = self._metric_expression(\"common\", \"=\", 1.0)\n m2_expr = self._metric_expression(\"m_a\", \">\", 1.0)\n self.assertSequenceEqual([r1], self._search(experiment_id,\n param_expressions=[p_expr],\n metrics_expressions=[m1_expr, m2_expr]))\n\n # test with mismatch param\n p_expr = self._param_expression(\"random_bad_name\", \"=\", \"p_val\")\n m1_expr = self._metric_expression(\"common\", \"=\", 1.0)\n m2_expr = self._metric_expression(\"m_a\", \">\", 1.0)\n self.assertSequenceEqual([], self._search(experiment_id,\n param_expressions=[p_expr],\n metrics_expressions=[m1_expr, m2_expr]))\n\n # test with mismatch metric\n p_expr = self._param_expression(\"generic_param\", \"=\", \"p_val\")\n m1_expr = self._metric_expression(\"common\", \"=\", 1.0)\n m2_expr = self._metric_expression(\"m_a\", \">\", 100.0)\n self.assertSequenceEqual([], self._search(experiment_id,\n param_expressions=[p_expr],\n metrics_expressions=[m1_expr, m2_expr]))\n" }, { "alpha_fraction": 0.7816377282142639, "alphanum_fraction": 0.7816377282142639, "avg_line_length": 43.77777862548828, "blob_id": "27a1a8712ebfbc7c3c4334250bdff450fc73d9d4", "content_id": "849f1663037c03aefdc00d959fd1e18344b99975", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 806, "license_type": "permissive", "max_line_length": 70, "num_lines": 18, "path": "/mlflow/utils/mlflow_tags.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "\"\"\"\nFile containing all of the run tags in the mlflow. namespace.\n\"\"\"\nMLFLOW_DATABRICKS_NOTEBOOK_ID = \"mlflow.databricks.notebookID\"\nMLFLOW_DATABRICKS_NOTEBOOK_PATH = \"mlflow.databricks.notebookPath\"\nMLFLOW_DATABRICKS_WEBAPP_URL = \"mlflow.databricks.webappURL\"\nMLFLOW_DATABRICKS_RUN_URL = \"mlflow.databricks.runURL\"\nMLFLOW_DATABRICKS_SHELL_JOB_ID = \"mlflow.databricks.shellJobID\"\nMLFLOW_DATABRICKS_SHELL_JOB_RUN_ID = \"mlflow.databricks.shellJobRunID\"\nMLFLOW_RUN_NAME = \"mlflow.runName\"\nMLFLOW_GIT_BRANCH_NAME = \"mlflow.gitBranchName\"\nMLFLOW_GIT_REPO_URL = \"mlflow.gitRepoURL\"\nMLFLOW_PARENT_RUN_ID = \"mlflow.parentRunId\"\nMLFLOW_ENV = \"mlflow.project.env\"\nMLFLOW_DOCKER = \"docker\"\nMLFLOW_CONDA = \"conda\"\nMLFLOW_DOCKER_IMAGE_NAME = \"mlflow.docker.image.name\"\nMLFLOW_DOCKER_IMAGE_ID = \"mlflow.docker.image.id\"\n" }, { "alpha_fraction": 0.5884928107261658, "alphanum_fraction": 0.5953720808029175, "avg_line_length": 45.01438903808594, "blob_id": "0372788d14aed94825bd4d63329fc515481cdc8a", "content_id": "26075a2ec953fb43514256c24a39924b8bd2a2b4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6396, "license_type": "permissive", "max_line_length": 99, "num_lines": 139, "path": "/tests/helper_functions.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport re\nimport requests\nimport string\nimport time\nimport signal\nfrom subprocess import Popen, PIPE, STDOUT\n\nimport pandas as pd\n\nimport mlflow.pyfunc.scoring_server as pyfunc_scoring_server\nimport mlflow.pyfunc\n\n\ndef random_int(lo=1, hi=1e10):\n return random.randint(lo, hi)\n\n\ndef random_str(size=10, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef random_file(ext):\n return \"temp_test_%d.%s\" % (random_int(), ext)\n\n\ndef score_model_in_sagemaker_docker_container(\n model_path, data, content_type, flavor=mlflow.pyfunc.FLAVOR_NAME,\n activity_polling_timeout_seconds=500):\n \"\"\"\n :param model_path: Path to the model to be served.\n :param data: The data to send to the docker container for testing. This is either a\n Pandas dataframe or string of the format specified by `content_type`.\n :param content_type: The type of the data to send to the docker container for testing. This is\n one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.\n :param flavor: Model flavor to be deployed.\n :param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before\n declaring the scoring process to have failed.\n \"\"\"\n env = dict(os.environ)\n env.update(LC_ALL=\"en_US.UTF-8\", LANG=\"en_US.UTF-8\")\n proc = _start_scoring_proc(\n cmd=['mlflow', 'sagemaker', 'run-local', '-m', model_path, '-p', \"5000\", \"-f\", flavor],\n env=env)\n return _evaluate_scoring_proc(proc, 5000, data, content_type, activity_polling_timeout_seconds)\n\n\ndef pyfunc_serve_and_score_model(\n model_path, data, content_type, activity_polling_timeout_seconds=500, extra_args=None):\n \"\"\"\n :param model_path: Path to the model to be served.\n :param data: The data to send to the pyfunc server for testing. This is either a\n Pandas dataframe or string of the format specified by `content_type`.\n :param content_type: The type of the data to send to the pyfunc server for testing. This is\n one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.\n :param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before\n declaring the scoring process to have failed.\n :param extra_args: A list of extra arguments to pass to the pyfunc scoring server command. For\n example, passing ``extra_args=[\"--no-conda\"]`` will pass the ``--no-conda``\n flag to the scoring server to ensure that conda environment activation\n is skipped.\n \"\"\"\n env = dict(os.environ)\n env.update(LC_ALL=\"en_US.UTF-8\", LANG=\"en_US.UTF-8\")\n scoring_cmd = ['mlflow', 'pyfunc', 'serve', '-m', model_path, \"-p\", \"0\"]\n if extra_args is not None:\n scoring_cmd += extra_args\n proc = _start_scoring_proc(cmd=scoring_cmd, env=env)\n for x in iter(proc.stdout.readline, \"\"):\n print(x)\n m = re.match(pattern=\".*Running on http://127.0.0.1:(\\\\d+).*\", string=x)\n if m:\n return _evaluate_scoring_proc(\n proc, int(m.group(1)), data, content_type, activity_polling_timeout_seconds)\n\n raise Exception(\"Failed to start server\")\n\n\ndef _start_scoring_proc(cmd, env):\n proc = Popen(cmd,\n stdout=PIPE,\n stderr=STDOUT,\n universal_newlines=True,\n env=env,\n # Assign the scoring process to a process group. All child processes of the\n # scoring process will be assigned to this group as well. This allows child\n # processes of the scoring process to be terminated successfully\n preexec_fn=os.setsid)\n return proc\n\n\ndef _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds=250):\n \"\"\"\n :param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before\n declaring the scoring process to have failed.\n \"\"\"\n try:\n for i in range(0, int(activity_polling_timeout_seconds / 5)):\n assert proc.poll() is None, \"scoring process died\"\n time.sleep(5)\n # noinspection PyBroadException\n try:\n ping_status = requests.get(url='http://localhost:%d/ping' % port)\n print('connection attempt', i, \"server is up! ping status\", ping_status)\n if ping_status.status_code == 200:\n break\n except Exception: # pylint: disable=broad-except\n print('connection attempt', i, \"failed, server is not up yet\")\n\n assert proc.poll() is None, \"scoring process died\"\n ping_status = requests.get(url='http://localhost:%d/ping' % port)\n print(\"server up, ping status\", ping_status)\n if ping_status.status_code != 200:\n raise Exception(\"ping failed, server is not happy\")\n if type(data) == pd.DataFrame:\n if content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON:\n data = data.to_json(orient=\"records\")\n elif content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED:\n data = data.to_json(orient=\"split\")\n elif content_type == pyfunc_scoring_server.CONTENT_TYPE_CSV:\n data = data.to_csv()\n else:\n raise Exception(\n \"Unexpected content type for Pandas dataframe input %s\" % content_type)\n response = requests.post(url='http://localhost:%d/invocations' % port,\n data=data,\n headers={\"Content-Type\": content_type})\n return response\n finally:\n if proc.poll() is None:\n # Terminate the process group containing the scoring process.\n # This will terminate all child processes of the scoring process\n pgrp = os.getpgid(proc.pid)\n os.killpg(pgrp, signal.SIGTERM)\n print(\"captured output of the scoring process\")\n print(\"-------------------------STDOUT------------------------------\")\n print(proc.stdout.read())\n print(\"==============================================================\")\n" }, { "alpha_fraction": 0.671875, "alphanum_fraction": 0.765625, "avg_line_length": 20.33333396911621, "blob_id": "00a8d317974a086fa4d1e511caa79971f582d263", "content_id": "fae81ccdde639145d8ac0cf83c80dbf8b7dd2f38", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 64, "license_type": "permissive", "max_line_length": 32, "num_lines": 3, "path": "/tests/resources/example_docker_project/Dockerfile", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "FROM continuumio/miniconda:4.5.4\n\nRUN pip install mlflow==0.8.1\n" }, { "alpha_fraction": 0.6206673383712769, "alphanum_fraction": 0.6292517185211182, "avg_line_length": 34.482757568359375, "blob_id": "f96efa196af0315b033068ace41c3c33f55e457f", "content_id": "1946d8b52062706454f1f1bbe388e8e0e7417aaf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6174, "license_type": "permissive", "max_line_length": 94, "num_lines": 174, "path": "/mlflow/store/dbmodels/models.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import time\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy import (\n Column, String, Float, ForeignKey, Integer, CheckConstraint,\n BigInteger, PrimaryKeyConstraint)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom mlflow.entities import (\n Experiment, RunTag, Metric, Param, RunData, RunInfo,\n SourceType, RunStatus, Run, ViewType)\nfrom mlflow.entities.lifecycle_stage import LifecycleStage\n\nBase = declarative_base()\n\n\nSourceTypes = [\n SourceType.to_string(SourceType.NOTEBOOK),\n SourceType.to_string(SourceType.JOB),\n SourceType.to_string(SourceType.LOCAL),\n SourceType.to_string(SourceType.UNKNOWN),\n SourceType.to_string(SourceType.PROJECT)\n]\n\nRunStatusTypes = [\n RunStatus.to_string(RunStatus.SCHEDULED),\n RunStatus.to_string(RunStatus.FAILED),\n RunStatus.to_string(RunStatus.FINISHED),\n RunStatus.to_string(RunStatus.RUNNING)\n]\n\n\ndef _create_entity(base, model):\n\n # create dict of kwargs properties for entity and return the initialized entity\n config = {}\n for k in base._properties():\n # check if its mlflow entity and build it\n obj = getattr(model, k)\n\n if isinstance(model, SqlRun):\n if base is RunData:\n # Run data contains list for metrics, params and tags\n # so obj will be a list so we need to convert those items\n if k == 'metrics':\n # only get latest recorded metrics per key\n metrics = {}\n for o in obj:\n if o.key not in metrics or o.timestamp > metrics.get(o.key).timestamp:\n metrics[o.key] = Metric(o.key, o.value, o.timestamp)\n obj = metrics.values()\n elif k == 'params':\n obj = [Param(o.key, o.value) for o in obj]\n elif k == 'tags':\n obj = [RunTag(o.key, o.value) for o in obj]\n elif base is RunInfo:\n if k == 'source_type':\n obj = SourceType.from_string(obj)\n elif k == \"status\":\n obj = RunStatus.from_string(obj)\n\n config[k] = obj\n return base(**config)\n\n\nclass SqlExperiment(Base):\n __tablename__ = 'experiments'\n\n experiment_id = Column(Integer, autoincrement=True)\n name = Column(String(256), unique=True, nullable=False)\n artifact_location = Column(String(256), nullable=True)\n lifecycle_stage = Column(String(32), default=LifecycleStage.ACTIVE)\n\n __table_args__ = (\n CheckConstraint(\n lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)),\n name='lifecycle_stage'),\n PrimaryKeyConstraint('experiment_id', name='experiment_pk')\n )\n\n def __repr__(self):\n return '<SqlExperiment ({}, {})>'.format(self.experiment_id, self.name)\n\n def to_mlflow_entity(self):\n return _create_entity(Experiment, self)\n\n\nclass SqlRun(Base):\n __tablename__ = 'runs'\n\n run_uuid = Column(String(32), nullable=False)\n name = Column(String(250))\n source_type = Column(String(20), default=SourceType.to_string(SourceType.LOCAL))\n source_name = Column(String(500))\n entry_point_name = Column(String(50))\n user_id = Column(String(256), nullable=True, default=None)\n status = Column(String(20), default=RunStatus.to_string(RunStatus.SCHEDULED))\n start_time = Column(BigInteger, default=int(time.time()))\n end_time = Column(BigInteger, nullable=True, default=None)\n source_version = Column(String(50))\n lifecycle_stage = Column(String(20), default=LifecycleStage.ACTIVE)\n artifact_uri = Column(String(200), default=None)\n experiment_id = Column(Integer, ForeignKey('experiments.experiment_id'))\n experiment = relationship('SqlExperiment', backref=backref('runs', cascade='all'))\n\n __table_args__ = (\n CheckConstraint(source_type.in_(SourceTypes), name='source_type'),\n CheckConstraint(status.in_(RunStatusTypes), name='status'),\n CheckConstraint(lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)),\n name='lifecycle_stage'),\n PrimaryKeyConstraint('run_uuid', name='run_pk')\n )\n\n def to_mlflow_entity(self):\n # run has diff parameter names in __init__ than in properties_ so we do this manually\n info = _create_entity(RunInfo, self)\n data = _create_entity(RunData, self)\n return Run(run_info=info, run_data=data)\n\n\nclass SqlTag(Base):\n __tablename__ = 'tags'\n\n key = Column(String(250))\n value = Column(String(250), nullable=True)\n run_uuid = Column(String(32), ForeignKey('runs.run_uuid'))\n run = relationship('SqlRun', backref=backref('tags', cascade='all'))\n\n __table_args__ = (\n PrimaryKeyConstraint('key', 'run_uuid', name='tag_pk'),\n )\n\n def __repr__(self):\n return '<SqlRunTag({}, {})>'.format(self.key, self.value)\n\n def to_mlflow_entity(self):\n return _create_entity(RunTag, self)\n\n\nclass SqlMetric(Base):\n __tablename__ = 'metrics'\n\n key = Column(String(250))\n value = Column(Float, nullable=False)\n timestamp = Column(BigInteger, default=int(time.time()))\n run_uuid = Column(String(32), ForeignKey('runs.run_uuid'))\n run = relationship('SqlRun', backref=backref('metrics', cascade='all'))\n\n __table_args__ = (\n PrimaryKeyConstraint('key', 'timestamp', 'run_uuid', name='metric_pk'),\n )\n\n def __repr__(self):\n return '<SqlMetric({}, {}, {})>'.format(self.key, self.value, self.timestamp)\n\n def to_mlflow_entity(self):\n return _create_entity(Metric, self)\n\n\nclass SqlParam(Base):\n __tablename__ = 'params'\n\n key = Column(String(250))\n value = Column(String(250), nullable=False)\n run_uuid = Column(String(32), ForeignKey('runs.run_uuid'))\n run = relationship('SqlRun', backref=backref('params', cascade='all'))\n\n __table_args__ = (\n PrimaryKeyConstraint('key', 'run_uuid', name='param_pk'),\n )\n\n def __repr__(self):\n return '<SqlParam({}, {})>'.format(self.key, self.value)\n\n def to_mlflow_entity(self):\n return _create_entity(Param, self)\n" }, { "alpha_fraction": 0.658833384513855, "alphanum_fraction": 0.6592530608177185, "avg_line_length": 28.787500381469727, "blob_id": "330ebae779a37c3f5d43d239ab58bccc55c05d4f", "content_id": "1524abfc5f0ebe32e4028847aa3a82ddcdb880e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2383, "license_type": "permissive", "max_line_length": 96, "num_lines": 80, "path": "/mlflow/server/js/src/components/RequestStateWrapper.js", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport './RequestStateWrapper.css';\nimport spinner from '../static/mlflow-spinner.png';\nimport { connect } from 'react-redux';\nimport { getApis } from '../reducers/Reducers';\nimport PropTypes from 'prop-types';\n\nexport class RequestStateWrapper extends Component {\n static propTypes = {\n shouldOptimisticallyRender: PropTypes.bool,\n requests: PropTypes.arrayOf(PropTypes.object).isRequired,\n children: PropTypes.node.isRequired,\n // (requests) => undefined | React Node.\n // This function is called when all requests are complete and when one or more of them is\n // in the error state. The function can choose to render an error view depending on the\n // type of errors received. If undefined is returned, then render the AppErrorBoundary view.\n errorRenderFunc: PropTypes.func,\n };\n\n static defaultProps = {\n shouldOptimisticallyRender: false,\n };\n\n state = {\n shouldRender: false,\n shouldRenderError: false,\n };\n\n static getErrorRequests(requests) {\n return requests.filter((r) => {\n return r.error !== undefined;\n });\n }\n\n static getDerivedStateFromProps(nextProps) {\n const shouldRender = nextProps.requests.every((r) => {\n return r.active === false;\n });\n return {\n shouldRender,\n shouldRenderError: RequestStateWrapper.getErrorRequests(nextProps.requests).length > 0,\n };\n }\n\n render() {\n const { children, errorRenderFunc, requests } = this.props;\n const { shouldRender, shouldRenderError } = this.state;\n if (shouldRender) {\n if (shouldRenderError) {\n if (errorRenderFunc) {\n const result = errorRenderFunc(this.props.requests);\n if (result) {\n return result;\n }\n }\n // This triggers the OOPS error boundary.\n console.error(\"ERROR\", requests);\n throw Error(\"GOTO error boundary\");\n } else {\n return children;\n }\n }\n if (this.props.shouldOptimisticallyRender) {\n return children;\n }\n return (\n <div className=\"RequestStateWrapper-spinner\">\n <img alt=\"Page loading...\" src={spinner}/>\n </div>\n );\n }\n}\n\nconst mapStateToProps = (state, ownProps) => {\n return Object.assign({}, ownProps, {\n requests: getApis(ownProps.requestIds, state)\n });\n};\n\nexport default connect(mapStateToProps)(RequestStateWrapper);\n" }, { "alpha_fraction": 0.6818980574607849, "alphanum_fraction": 0.7627416253089905, "avg_line_length": 16.24242401123047, "blob_id": "99131c4343a8c82568d9e81423782f15bfb31a3e", "content_id": "80283bbc1307ed1ecc827ed013e188d98ea914ab", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 569, "license_type": "permissive", "max_line_length": 79, "num_lines": 33, "path": "/test-requirements.txt", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "# Test reqs\nazure-storage\ngoogle-cloud-storage\nh2o\nbotocore<=1.12.84\nboto3<=1.9.84\nmock==2.0.0\nmoto\npandas<=0.23.4\nprospector[with_pyroma]==0.12.7\npep8==1.7.1\npyarrow\npylint==1.8.2\npyspark\npytest==3.2.1\npytest-cov==2.6.0\nrstcheck==3.2\n# TODO: Stop pinning the version of scikit-learn when the latest version of the\n# library on Anaconda catches up to pip\nscikit-learn==0.20.0\nscipy\ntensorflow\ntorch\ntorchvision\npysftp\nkeras\nattrdict==2.0.0\nazureml-sdk; python_version >= \"3.0\"\ncloudpickle\npytest-localserver\nsqlalchemy\n# test plugin\ntests/resources/mlflow-test-plugin/\n" }, { "alpha_fraction": 0.711861789226532, "alphanum_fraction": 0.7123245596885681, "avg_line_length": 37.36094665527344, "blob_id": "de9002996ea525cd71b10eeacb8a56398ad743ea", "content_id": "2196ca0722e6059bef88803ab9c4d8913d465ddf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6483, "license_type": "permissive", "max_line_length": 99, "num_lines": 169, "path": "/mlflow/server/js/src/components/ExperimentPage.js", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport './ExperimentPage.css';\nimport PropTypes from 'prop-types';\nimport { getExperimentApi, getUUID, searchRunsApi } from '../Actions';\nimport { connect } from 'react-redux';\nimport ExperimentView from './ExperimentView';\nimport RequestStateWrapper from './RequestStateWrapper';\nimport KeyFilter from '../utils/KeyFilter';\nimport { ViewType } from '../sdk/MlflowEnums';\nimport { SearchUtils } from \"../utils/SearchUtils\";\nimport LocalStorageUtils from \"../utils/LocalStorageUtils\";\nimport { ExperimentPagePersistedState } from \"../sdk/MlflowLocalStorageMessages\";\nimport Utils from \"../utils/Utils\";\nimport ErrorCodes from \"../sdk/ErrorCodes\";\nimport PermissionDeniedView from \"./PermissionDeniedView\";\n\nexport const LIFECYCLE_FILTER = { ACTIVE: 'Active', DELETED: 'Deleted' };\n\nclass ExperimentPage extends Component {\n constructor(props) {\n super(props);\n this.onSearch = this.onSearch.bind(this);\n this.getRequestIds = this.getRequestIds.bind(this);\n const store = ExperimentPage.getLocalStore(this.props.experimentId);\n // Load state data persisted in localStorage. If data isn't present in localStorage (e.g. the\n // first time we construct this component in a browser), the default values in\n // ExperimentPagePersistedState will take precedence.\n const persistedState = new ExperimentPagePersistedState(store.loadComponentState());\n this.state = {\n ...ExperimentPage.getDefaultUnpersistedState(),\n persistedState: persistedState.toJSON(),\n };\n }\n\n static propTypes = {\n experimentId: PropTypes.number.isRequired,\n dispatchSearchRuns: PropTypes.func.isRequired,\n };\n\n /** Returns default values for state attributes that aren't persisted in local storage. */\n static getDefaultUnpersistedState() {\n return {\n // String UUID associated with a GetExperiment API request\n getExperimentRequestId: getUUID(),\n // String UUID associated with a SearchRuns API request\n searchRunsRequestId: getUUID(),\n // Last experiment, if any, displayed by this instance of ExperimentPage\n lastExperimentId: undefined,\n // Lifecycle filter of runs to display\n lifecycleFilter: LIFECYCLE_FILTER.ACTIVE,\n };\n }\n\n /**\n * Returns a LocalStorageStore instance that can be used to persist data associated with the\n * ExperimentPage component (e.g. component state like metric/param filter info), for the\n * specified experiment.\n */\n static getLocalStore(experimentId) {\n return LocalStorageUtils.getStoreForComponent(\"ExperimentPage\", experimentId);\n }\n\n snapshotComponentState() {\n const store = ExperimentPage.getLocalStore(this.props.experimentId);\n store.saveComponentState(new ExperimentPagePersistedState(this.state.persistedState));\n }\n\n componentDidUpdate() {\n this.snapshotComponentState();\n }\n\n componentWillUnmount() {\n // Snapshot component state on unmounts to ensure we've captured component state in cases where\n // componentDidUpdate doesn't fire.\n this.snapshotComponentState();\n }\n\n static getDerivedStateFromProps(props, state) {\n if (props.experimentId !== state.lastExperimentId) {\n const store = ExperimentPage.getLocalStore(props.experimentId);\n const loadedState = new ExperimentPagePersistedState(store.loadComponentState()).toJSON();\n const newState = {\n ...ExperimentPage.getDefaultUnpersistedState(),\n persistedState: loadedState,\n lastExperimentId: props.experimentId,\n lifecycleFilter: LIFECYCLE_FILTER.ACTIVE,\n };\n props.dispatch(getExperimentApi(props.experimentId, newState.getExperimentRequestId));\n props.dispatch(searchRunsApi(\n [props.experimentId],\n SearchUtils.parseSearchInput(newState.persistedState.searchInput),\n lifecycleFilterToRunViewType(newState.lifecycleFilter),\n newState.searchRunsRequestId));\n return newState;\n }\n return null;\n }\n\n onSearch(paramKeyFilterString, metricKeyFilterString, searchInput, lifecycleFilterInput) {\n const andedExpressions = SearchUtils.parseSearchInput(searchInput);\n this.setState({\n persistedState: new ExperimentPagePersistedState({\n paramKeyFilterString,\n metricKeyFilterString,\n searchInput,\n }).toJSON(),\n lifecycleFilter: lifecycleFilterInput,\n });\n const searchRunsRequestId = this.props.dispatchSearchRuns(\n this.props.experimentId, andedExpressions, lifecycleFilterInput);\n this.setState({ searchRunsRequestId });\n }\n\n render() {\n return (\n <div className=\"ExperimentPage runs-table-flex-container\" style={{height: \"100%\"}}>\n <RequestStateWrapper\n requestIds={this.getRequestIds()}\n errorRenderFunc={(requests) => {\n const getExperimentRequest = Utils.getRequestWithId(\n requests, this.state.getExperimentRequestId);\n if (getExperimentRequest.error.getErrorCode() === ErrorCodes.PERMISSION_DENIED) {\n return (<PermissionDeniedView\n errorMessage={getExperimentRequest.error.xhr.responseJSON.message}\n />);\n }\n return undefined;\n }}\n >\n <ExperimentView\n paramKeyFilter={new KeyFilter(this.state.persistedState.paramKeyFilterString)}\n metricKeyFilter={new KeyFilter(this.state.persistedState.metricKeyFilterString)}\n experimentId={this.props.experimentId}\n searchRunsRequestId={this.state.searchRunsRequestId}\n lifecycleFilter={this.state.lifecycleFilter}\n onSearch={this.onSearch}\n searchInput={this.state.persistedState.searchInput}\n />\n </RequestStateWrapper>\n </div>\n );\n }\n\n getRequestIds() {\n return [this.state.getExperimentRequestId, this.state.searchRunsRequestId];\n }\n}\n\nconst mapDispatchToProps = (dispatch) => {\n return {\n dispatch,\n dispatchSearchRuns: (experimentId, andedExpressions, lifecycleFilterInput) => {\n const requestId = getUUID();\n dispatch(searchRunsApi([experimentId], andedExpressions,\n lifecycleFilterToRunViewType(lifecycleFilterInput), requestId));\n return requestId;\n }\n };\n};\n\nconst lifecycleFilterToRunViewType = (lifecycleFilter) => {\n if (lifecycleFilter === LIFECYCLE_FILTER.ACTIVE) {\n return ViewType.ACTIVE_ONLY;\n } else {\n return ViewType.DELETED_ONLY;\n }\n};\n\nexport default connect(undefined, mapDispatchToProps)(ExperimentPage);\n" }, { "alpha_fraction": 0.6365098357200623, "alphanum_fraction": 0.6469641923904419, "avg_line_length": 39.1129035949707, "blob_id": "fb4e60cb592240727c53a37186150a87cf5dbf51", "content_id": "c704ab7696e534715d956c1b2d8ff8286664612d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2487, "license_type": "permissive", "max_line_length": 97, "num_lines": 62, "path": "/mlflow/utils/__init__.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "from sys import version_info\n\nimport numpy as np\nimport pandas as pd\n\n\nPYTHON_VERSION = \"{major}.{minor}.{micro}\".format(major=version_info.major,\n minor=version_info.minor,\n micro=version_info.micro)\n\n\ndef get_jsonable_obj(data, pandas_orient=\"records\"):\n \"\"\"Attempt to make the data json-able via standard library.\n Look for some commonly used types that are not jsonable and convert them into json-able ones.\n Unknown data types are returned as is.\n\n :param data: data to be converted, works with pandas and numpy, rest will be returned as is.\n :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON\n dictionary using this Pandas serialization orientation.\n \"\"\"\n if isinstance(data, np.ndarray):\n return data.tolist()\n if isinstance(data, pd.DataFrame):\n return data.to_dict(orient=pandas_orient)\n if isinstance(data, pd.Series):\n return pd.DataFrame(data).to_dict(orient=pandas_orient)\n else: # by default just return whatever this is and hope for the best\n return data\n\n\ndef get_major_minor_py_version(py_version):\n return \".\".join(py_version.split(\".\")[:2])\n\n\ndef get_unique_resource_id(max_length=None):\n \"\"\"\n Obtains a unique id that can be included in a resource name. This unique id is a valid\n DNS subname.\n\n :param max_length: The maximum length of the identifier\n :return: A unique identifier that can be appended to a user-readable resource name to avoid\n naming collisions.\n \"\"\"\n import uuid\n import base64\n if max_length is not None and max_length <= 0:\n raise ValueError(\n \"The specified maximum length for the unique resource id must be positive!\")\n\n uuid_bytes = uuid.uuid4().bytes\n # Use base64 encoding to shorten the UUID length. Note that the replacement of the\n # unsupported '+' symbol maintains uniqueness because the UUID byte string is of a fixed,\n # 16-byte length\n uuid_b64 = base64.b64encode(uuid_bytes)\n if version_info >= (3, 0):\n # In Python3, `uuid_b64` is a `bytes` object. It needs to be\n # converted to a string\n uuid_b64 = uuid_b64.decode(\"ascii\")\n unique_id = uuid_b64.rstrip('=\\n').replace(\"/\", \"-\").replace(\"+\", \"AB\").lower()\n if max_length is not None:\n unique_id = unique_id[:int(max_length)]\n return unique_id\n" }, { "alpha_fraction": 0.7037551999092102, "alphanum_fraction": 0.7051460146903992, "avg_line_length": 36.842105865478516, "blob_id": "dc709efc6447731019942210afe9ca97a9ea2e1f", "content_id": "75bdb1d6d9106ff41d1de8f643ffeabe620820bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "permissive", "max_line_length": 90, "num_lines": 19, "path": "/tests/test_exceptions.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import json\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\n\n\nclass TestMlflowException(object):\n def test_error_code_constructor(self):\n assert MlflowException('test', error_code=INVALID_PARAMETER_VALUE).error_code == \\\n 'INVALID_PARAMETER_VALUE'\n\n def test_default_error_code(self):\n assert MlflowException('test').error_code == 'INTERNAL_ERROR'\n\n def test_serialize_to_json(self):\n mlflow_exception = MlflowException('test')\n deserialized = json.loads(mlflow_exception.serialize_as_json())\n assert deserialized['message'] == 'test'\n assert deserialized['error_code'] == 'INTERNAL_ERROR'\n" }, { "alpha_fraction": 0.6798557043075562, "alphanum_fraction": 0.6822229623794556, "avg_line_length": 37.072959899902344, "blob_id": "40eddd1abc14506412d6bd7b8ebd3d2f21a425c8", "content_id": "a8efc5898aeec1c0d361890b769397b15b34be56", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8871, "license_type": "permissive", "max_line_length": 99, "num_lines": 233, "path": "/tests/keras/test_keras_model_export.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "# pep8: disable=E501\n\nfrom __future__ import print_function\n\nimport os\nimport json\nimport pytest\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport sklearn.datasets as datasets\nimport pandas as pd\nimport numpy as np\nimport yaml\n\nimport mlflow\nimport mlflow.keras\nimport mlflow.pyfunc.scoring_server as pyfunc_scoring_server\nfrom mlflow import pyfunc\nfrom mlflow.models import Model\nfrom mlflow.tracking.utils import _get_model_log_dir\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom tests.helper_functions import pyfunc_serve_and_score_model\nfrom tests.helper_functions import score_model_in_sagemaker_docker_container\nfrom tests.pyfunc.test_spark import score_model_as_udf\nfrom tests.projects.utils import tracking_uri_mock # pylint: disable=unused-import\n\n\n@pytest.fixture(scope='module')\ndef data():\n iris = datasets.load_iris()\n data = pd.DataFrame(data=np.c_[iris['data'], iris['target']],\n columns=iris['feature_names'] + ['target'])\n y = data['target']\n x = data.drop('target', axis=1)\n return x, y\n\n\n@pytest.fixture(scope='module')\ndef model(data):\n x, y = data\n model = Sequential()\n model.add(Dense(3, input_dim=4))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='SGD')\n model.fit(x, y)\n return model\n\n\n@pytest.fixture(scope='module')\ndef predicted(model, data):\n return model.predict(data[0])\n\n\n@pytest.fixture\ndef model_path(tmpdir):\n return os.path.join(tmpdir.strpath, \"model\")\n\n\n@pytest.fixture\ndef keras_custom_env(tmpdir):\n conda_env = os.path.join(str(tmpdir), \"conda_env.yml\")\n _mlflow_conda_env(\n conda_env,\n additional_conda_deps=[\"keras\", \"tensorflow\", \"pytest\"])\n return conda_env\n\n\ndef test_model_save_load(model, model_path, data, predicted):\n x, y = data\n mlflow.keras.save_model(model, model_path)\n\n # Loading Keras model\n model_loaded = mlflow.keras.load_model(model_path)\n assert all(model_loaded.predict(x) == predicted)\n\n # Loading pyfunc model\n pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)\n assert all(pyfunc_loaded.predict(x).values == predicted)\n\n # pyfunc serve\n scoring_response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=pd.DataFrame(x),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)\n assert all(pd.read_json(scoring_response.content, orient=\"records\").values.astype(np.float32)\n == predicted)\n\n # test spark udf\n spark_udf_preds = score_model_as_udf(os.path.abspath(model_path),\n run_id=None,\n pandas_df=pd.DataFrame(x),\n result_type=\"float\")\n np.testing.assert_array_almost_equal(\n np.array(spark_udf_preds), predicted.reshape(len(spark_udf_preds)), decimal=4)\n\n\ndef test_model_log(tracking_uri_mock, model, data, predicted): # pylint: disable=unused-argument\n x, y = data\n # should_start_run tests whether or not calling log_model() automatically starts a run.\n for should_start_run in [False, True]:\n try:\n if should_start_run:\n mlflow.start_run()\n mlflow.keras.log_model(model, artifact_path=\"keras_model\")\n\n # Load model\n model_loaded = mlflow.keras.load_model(\n \"keras_model\",\n run_id=mlflow.active_run().info.run_uuid)\n assert all(model_loaded.predict(x) == predicted)\n\n # Loading pyfunc model\n pyfunc_loaded = mlflow.pyfunc.load_pyfunc(\n \"keras_model\",\n run_id=mlflow.active_run().info.run_uuid)\n assert all(pyfunc_loaded.predict(x).values == predicted)\n finally:\n mlflow.end_run()\n\n\ndef test_model_save_persists_specified_conda_env_in_mlflow_model_directory(\n model, model_path, keras_custom_env):\n mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=keras_custom_env)\n\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n assert saved_conda_env_path != keras_custom_env\n\n with open(keras_custom_env, \"r\") as f:\n keras_custom_env_parsed = yaml.safe_load(f)\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == keras_custom_env_parsed\n\n\ndef test_model_save_accepts_conda_env_as_dict(model, model_path):\n conda_env = dict(mlflow.keras.DEFAULT_CONDA_ENV)\n conda_env[\"dependencies\"].append(\"pytest\")\n mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=conda_env)\n\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == conda_env\n\n\ndef test_model_log_persists_specified_conda_env_in_mlflow_model_directory(model, keras_custom_env):\n artifact_path = \"model\"\n with mlflow.start_run():\n mlflow.keras.log_model(\n keras_model=model, artifact_path=artifact_path, conda_env=keras_custom_env)\n run_id = mlflow.active_run().info.run_uuid\n model_path = _get_model_log_dir(artifact_path, run_id)\n\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n assert saved_conda_env_path != keras_custom_env\n\n with open(keras_custom_env, \"r\") as f:\n keras_custom_env_parsed = yaml.safe_load(f)\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == keras_custom_env_parsed\n\n\ndef test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(\n model, model_path):\n mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=None)\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n with open(conda_env_path, \"r\") as f:\n conda_env = yaml.safe_load(f)\n\n assert conda_env == mlflow.keras.DEFAULT_CONDA_ENV\n\n\ndef test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(\n model):\n artifact_path = \"model\"\n with mlflow.start_run():\n mlflow.keras.log_model(keras_model=model, artifact_path=artifact_path, conda_env=None)\n run_id = mlflow.active_run().info.run_uuid\n model_path = _get_model_log_dir(artifact_path, run_id)\n\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n with open(conda_env_path, \"r\") as f:\n conda_env = yaml.safe_load(f)\n\n assert conda_env == mlflow.keras.DEFAULT_CONDA_ENV\n\n\ndef test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path(\n model, model_path, data, predicted):\n \"\"\"\n This is a backwards compatibility test to ensure that models saved in MLflow version <= 0.8.0\n can be loaded successfully. These models are missing the `data` flavor configuration key.\n \"\"\"\n mlflow.keras.save_model(keras_model=model, path=model_path)\n\n model_conf_path = os.path.join(model_path, \"MLmodel\")\n model_conf = Model.load(model_conf_path)\n flavor_conf = model_conf.flavors.get(mlflow.keras.FLAVOR_NAME, None)\n assert flavor_conf is not None\n del flavor_conf['data']\n model_conf.save(model_conf_path)\n\n model_loaded = mlflow.keras.load_model(model_path)\n assert all(model_loaded.predict(data[0]) == predicted)\n\n\n@pytest.mark.release\ndef test_sagemaker_docker_model_scoring_with_default_conda_env(model, model_path, data, predicted):\n mlflow.keras.save_model(keras_model=model, path=model_path, conda_env=None)\n\n scoring_response = score_model_in_sagemaker_docker_container(\n model_path=model_path,\n data=data[0],\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n flavor=mlflow.pyfunc.FLAVOR_NAME,\n activity_polling_timeout_seconds=500)\n deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content))\n\n np.testing.assert_array_almost_equal(\n deployed_model_preds.values,\n predicted,\n decimal=4)\n" }, { "alpha_fraction": 0.6270439028739929, "alphanum_fraction": 0.6280615925788879, "avg_line_length": 50.35540008544922, "blob_id": "c108c4a521ae94affc382f25923ada71d3581adc", "content_id": "79fa1e48d7ad4e7cf5b47ccab85c187d99c1b807", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14739, "license_type": "permissive", "max_line_length": 100, "num_lines": 287, "path": "/mlflow/tensorflow.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "\"\"\"\nThe ``mlflow.tensorflow`` module provides an API for logging and loading TensorFlow models.\nThis module exports TensorFlow models with the following flavors:\n\nTensorFlow (native) format\n This is the main flavor that can be loaded back into TensorFlow.\n:py:mod:`mlflow.pyfunc`\n Produced for use by generic pyfunc-based deployment tools and batch inference.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\nimport yaml\nimport logging\n\nimport pandas\nimport tensorflow as tf\n\nimport mlflow\nfrom mlflow import pyfunc\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model\nfrom mlflow.protos.databricks_pb2 import DIRECTORY_NOT_EMPTY\nfrom mlflow.tracking.utils import _get_model_log_dir\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.file_utils import _copy_file_or_tree\nfrom mlflow.utils.model_utils import _get_flavor_configuration\n\nFLAVOR_NAME = \"tensorflow\"\n\nDEFAULT_CONDA_ENV = _mlflow_conda_env(\n additional_conda_deps=[\n \"tensorflow={}\".format(tf.__version__),\n ],\n additional_pip_deps=None,\n additional_conda_channels=None,\n)\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef log_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key, artifact_path,\n conda_env=None):\n \"\"\"\n Log a *serialized* collection of TensorFlow graphs and variables as an MLflow model\n for the current run. This method operates on TensorFlow variables and graphs that have been\n serialized in TensorFlow's ``SavedModel`` format. For more information about ``SavedModel``\n format, see the TensorFlow documentation:\n https://www.tensorflow.org/guide/saved_model#save_and_restore_models.\n\n :param tf_saved_model_dir: Path to the directory containing serialized TensorFlow variables and\n graphs in ``SavedModel`` format.\n :param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the\n serialized ``SavedModel`` object. For more information, see the\n ``tags`` parameter of the\n ``tf.saved_model.builder.SavedModelBuilder`` method.\n :param tf_signature_def_key: A string identifying the input/output signature associated with the\n model. This is a key within the serialized ``SavedModel`` signature\n definition mapping. For more information, see the\n ``signature_def_map`` parameter of the\n ``tf.saved_model.builder.SavedModelBuilder`` method.\n :param artifact_path: The run-relative path to which to log model artifacts.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in ``mlflow.tensorflow.DEFAULT_CONDA_ENV``. If ``None``, the default\n ``mlflow.tensorflow.DEFAULT_CONDA_ENV`` environment will be added to the\n model. The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'tensorflow=1.8.0'\n ]\n }\n\n \"\"\"\n return Model.log(artifact_path=artifact_path, flavor=mlflow.tensorflow,\n tf_saved_model_dir=tf_saved_model_dir, tf_meta_graph_tags=tf_meta_graph_tags,\n tf_signature_def_key=tf_signature_def_key, conda_env=conda_env)\n\n\ndef save_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key, path,\n mlflow_model=Model(), conda_env=None):\n \"\"\"\n Save a *serialized* collection of TensorFlow graphs and variables as an MLflow model\n to a local path. This method operates on TensorFlow variables and graphs that have been\n serialized in TensorFlow's ``SavedModel`` format. For more information about ``SavedModel``\n format, see the TensorFlow documentation:\n https://www.tensorflow.org/guide/saved_model#save_and_restore_models.\n\n :param tf_saved_model_dir: Path to the directory containing serialized TensorFlow variables and\n graphs in ``SavedModel`` format.\n :param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the\n serialized ``SavedModel`` object. For more information, see the\n ``tags`` parameter of the\n ``tf.saved_model.builder.savedmodelbuilder`` method.\n :param tf_signature_def_key: A string identifying the input/output signature associated with the\n model. This is a key within the serialized ``savedmodel``\n signature definition mapping. For more information, see the\n ``signature_def_map`` parameter of the\n ``tf.saved_model.builder.savedmodelbuilder`` method.\n :param path: Local path where the MLflow model is to be saved.\n :param mlflow_model: MLflow model configuration to which this flavor will be added.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in ``mlflow.tensorflow.DEFAULT_CONDA_ENV``. If ``None``, the default\n ``mlflow.tensorflow.DEFAULT_CONDA_ENV`` environment will be added to the\n model. The following is an *example* dictionary representation of a Conda\n environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'tensorflow=1.8.0'\n ]\n }\n\n \"\"\"\n _logger.info(\n \"Validating the specified TensorFlow model by attempting to load it in a new TensorFlow\"\n \" graph...\")\n _validate_saved_model(tf_saved_model_dir=tf_saved_model_dir,\n tf_meta_graph_tags=tf_meta_graph_tags,\n tf_signature_def_key=tf_signature_def_key)\n _logger.info(\"Validation succeeded!\")\n\n if os.path.exists(path):\n raise MlflowException(\"Path '{}' already exists\".format(path), DIRECTORY_NOT_EMPTY)\n os.makedirs(path)\n root_relative_path = _copy_file_or_tree(src=tf_saved_model_dir, dst=path, dst_dir=None)\n model_dir_subpath = \"tfmodel\"\n shutil.move(os.path.join(path, root_relative_path), os.path.join(path, model_dir_subpath))\n\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = DEFAULT_CONDA_ENV\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n mlflow_model.add_flavor(FLAVOR_NAME, saved_model_dir=model_dir_subpath,\n meta_graph_tags=tf_meta_graph_tags,\n signature_def_key=tf_signature_def_key)\n pyfunc.add_to_model(mlflow_model, loader_module=\"mlflow.tensorflow\", env=conda_env_subpath)\n mlflow_model.save(os.path.join(path, \"MLmodel\"))\n\n\ndef _validate_saved_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key):\n \"\"\"\n Validate the TensorFlow SavedModel by attempting to load it in a new TensorFlow graph.\n If the loading process fails, any exceptions thrown by TensorFlow will be propagated.\n \"\"\"\n validation_tf_graph = tf.Graph()\n validation_tf_sess = tf.Session(graph=validation_tf_graph)\n with validation_tf_graph.as_default():\n _load_model(tf_saved_model_dir=tf_saved_model_dir,\n tf_sess=validation_tf_sess,\n tf_meta_graph_tags=tf_meta_graph_tags,\n tf_signature_def_key=tf_signature_def_key)\n\n\ndef load_model(path, tf_sess, run_id=None):\n \"\"\"\n Load an MLflow model that contains the TensorFlow flavor from the specified path.\n\n **This method must be called within a TensorFlow graph context.**\n\n :param path: The local filesystem path or run-relative artifact path to the model.\n :param tf_sess: The TensorFlow session in which to the load the model.\n :return: A TensorFlow signature definition of type:\n ``tensorflow.core.protobuf.meta_graph_pb2.SignatureDef``. This defines the input and\n output tensors for model inference.\n\n >>> import mlflow.tensorflow\n >>> import tensorflow as tf\n >>> tf_graph = tf.Graph()\n >>> tf_sess = tf.Session(graph=tf_graph)\n >>> with tf_graph.as_default():\n >>> signature_definition = mlflow.tensorflow.load_model(path=\"model_path\", tf_sess=tf_sess)\n >>> input_tensors = [tf_graph.get_tensor_by_name(input_signature.name)\n >>> for _, input_signature in signature_def.inputs.items()]\n >>> output_tensors = [tf_graph.get_tensor_by_name(output_signature.name)\n >>> for _, output_signature in signature_def.outputs.items()]\n \"\"\"\n if run_id is not None:\n path = _get_model_log_dir(model_name=path, run_id=run_id)\n path = os.path.abspath(path)\n flavor_conf = _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME)\n tf_saved_model_dir = os.path.join(path, flavor_conf['saved_model_dir'])\n return _load_model(tf_saved_model_dir=tf_saved_model_dir, tf_sess=tf_sess,\n tf_meta_graph_tags=flavor_conf['meta_graph_tags'],\n tf_signature_def_key=flavor_conf['signature_def_key'])\n\n\ndef _load_model(tf_saved_model_dir, tf_sess, tf_meta_graph_tags, tf_signature_def_key):\n \"\"\"\n Load a specified TensorFlow model consisting of a TensorFlow meta graph and signature definition\n from a serialized TensorFlow ``SavedModel`` collection.\n\n :param tf_saved_model_dir: The local filesystem path or run-relative artifact path to the model.\n :param tf_sess: The TensorFlow session in which to the load the metagraph.\n :param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the\n serialized `SavedModel` object. For more information, see the `tags`\n parameter of the `tf.saved_model.builder.SavedModelBuilder` method:\n https://www.tensorflow.org/api_docs/python/tf/saved_model/builder/\n SavedModelBuilder#add_meta_graph\n :param tf_signature_def_key: A string identifying the input/output signature associated with the\n model. This is a key within the serialized `SavedModel`'s signature\n definition mapping. For more information, see the\n `signature_def_map` parameter of the\n `tf.saved_model.builder.SavedModelBuilder` method.\n :return: A TensorFlow signature definition of type:\n ``tensorflow.core.protobuf.meta_graph_pb2.SignatureDef``. This defines input and\n output tensors within the specified metagraph for inference.\n \"\"\"\n meta_graph_def = tf.saved_model.loader.load(\n sess=tf_sess,\n tags=tf_meta_graph_tags,\n export_dir=tf_saved_model_dir)\n if tf_signature_def_key not in meta_graph_def.signature_def:\n raise MlflowException(\"Could not find signature def key %s\" % tf_signature_def_key)\n return meta_graph_def.signature_def[tf_signature_def_key]\n\n\ndef _load_pyfunc(path):\n \"\"\"\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. This function loads an MLflow\n model with the TensorFlow flavor into a new TensorFlow graph and exposes it behind the\n `pyfunc.predict` interface.\n \"\"\"\n tf_graph = tf.Graph()\n tf_sess = tf.Session(graph=tf_graph)\n with tf_graph.as_default():\n signature_def = load_model(path=path, tf_sess=tf_sess, run_id=None)\n\n return _TFWrapper(tf_sess=tf_sess, tf_graph=tf_graph, signature_def=signature_def)\n\n\nclass _TFWrapper(object):\n \"\"\"\n Wrapper class that exposes a TensorFlow model for inference via a ``predict`` function such that\n predict(data: pandas.DataFrame) -> pandas.DataFrame.\n \"\"\"\n def __init__(self, tf_sess, tf_graph, signature_def):\n \"\"\"\n :param tf_sess: The TensorFlow session used to evaluate the model.\n :param tf_graph: The TensorFlow graph containing the model.\n :param signature_def: The TensorFlow signature definition used to transform input dataframes\n into tensors and output vectors into dataframes.\n \"\"\"\n self.tf_sess = tf_sess\n self.tf_graph = tf_graph\n # We assume that input keys in the signature definition correspond to input DataFrame column\n # names\n self.input_tensor_mapping = {\n tensor_column_name: tf_graph.get_tensor_by_name(tensor_info.name)\n for tensor_column_name, tensor_info in signature_def.inputs.items()\n }\n # We assume that output keys in the signature definition correspond to output DataFrame\n # column names\n self.output_tensors = {\n sigdef_output: tf_graph.get_tensor_by_name(tnsr_info.name)\n for sigdef_output, tnsr_info in signature_def.outputs.items()\n }\n\n def predict(self, df):\n with self.tf_graph.as_default():\n # Build the feed dict, mapping input tensors to DataFrame column values.\n feed_dict = {\n self.input_tensor_mapping[tensor_column_name]: df[tensor_column_name].values\n for tensor_column_name in self.input_tensor_mapping.keys()\n }\n raw_preds = self.tf_sess.run(self.output_tensors, feed_dict=feed_dict)\n pred_dict = {column_name: values.ravel() for column_name, values in raw_preds.items()}\n return pandas.DataFrame(data=pred_dict)\n" }, { "alpha_fraction": 0.6223288178443909, "alphanum_fraction": 0.6223288178443909, "avg_line_length": 45.79591751098633, "blob_id": "e3044f6abce807b6de95ed516131b9517646fed3", "content_id": "a8c67f7fdd6df0f8e752a262e0caf6c6299924ce", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2293, "license_type": "permissive", "max_line_length": 98, "num_lines": 49, "path": "/mlflow/store/local_artifact_repo.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import distutils.dir_util as dir_util\nimport shutil\n\nfrom mlflow.store.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import mkdir, list_all, get_file_info\nfrom mlflow.utils.validation import path_not_unique, bad_path_message\n\n\nclass LocalArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts as files in a local directory.\"\"\"\n\n def get_path_module(self):\n import os\n return os.path\n\n def log_artifact(self, local_file, artifact_path=None):\n if artifact_path and path_not_unique(artifact_path):\n raise Exception(\"Invalid artifact path: '%s'. %s\" % (artifact_path,\n bad_path_message(artifact_path)))\n artifact_dir = self.get_path_module().join(self.artifact_uri, artifact_path) \\\n if artifact_path else self.artifact_uri\n if not self.get_path_module().exists(artifact_dir):\n mkdir(artifact_dir)\n shutil.copy(local_file, artifact_dir)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n if artifact_path and path_not_unique(artifact_path):\n raise Exception(\"Invalid artifact path: '%s'. %s\" % (artifact_path,\n bad_path_message(artifact_path)))\n artifact_dir = self.get_path_module().join(self.artifact_uri, artifact_path) \\\n if artifact_path else self.artifact_uri\n if not self.get_path_module().exists(artifact_dir):\n mkdir(artifact_dir)\n dir_util.copy_tree(src=local_dir, dst=artifact_dir)\n\n def list_artifacts(self, path=None):\n artifact_dir = self.artifact_uri\n list_dir = self.get_path_module().join(artifact_dir, path) if path else artifact_dir\n if self.get_path_module().isdir(list_dir):\n artifact_files = list_all(list_dir, full_path=True)\n infos = [get_file_info(f, self.get_path_module().relpath(f, artifact_dir))\n for f in artifact_files]\n return sorted(infos, key=lambda f: f.path)\n else:\n return []\n\n def _download_file(self, remote_file_path, local_path):\n shutil.copyfile(\n self.get_path_module().join(self.artifact_uri, remote_file_path), local_path)\n" }, { "alpha_fraction": 0.7160804271697998, "alphanum_fraction": 0.7184451818466187, "avg_line_length": 41.822784423828125, "blob_id": "2b4738d1da12a99b099d0fa72c5e637313302d93", "content_id": "bbdcfd1c4cdcbaa8e1725e42a5846d480b841d5b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6766, "license_type": "permissive", "max_line_length": 100, "num_lines": 158, "path": "/tests/pyfunc/test_scoring_server.py", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport pandas as pd\nimport numpy as np\nfrom collections import namedtuple\n\nimport pytest\nimport sklearn.datasets as datasets\nimport sklearn.neighbors as knn\n\nimport mlflow.pyfunc.scoring_server as pyfunc_scoring_server\nimport mlflow.sklearn\nfrom mlflow.protos.databricks_pb2 import ErrorCode, MALFORMED_REQUEST, BAD_REQUEST\n\nfrom tests.helper_functions import pyfunc_serve_and_score_model\n\n\nModelWithData = namedtuple(\"ModelWithData\", [\"model\", \"inference_data\"])\n\n\n@pytest.fixture(scope=\"session\")\ndef sklearn_model():\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features.\n y = iris.target\n knn_model = knn.KNeighborsClassifier()\n knn_model.fit(X, y)\n return ModelWithData(model=knn_model, inference_data=X)\n\n\n@pytest.fixture\ndef model_path(tmpdir):\n return str(os.path.join(tmpdir.strpath, \"model\"))\n\n\ndef test_scoring_server_responds_to_invalid_json_input_with_stacktrace_and_error_code(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n\n incorrect_json_content = json.dumps({\"not\": \"a serialized dataframe\"})\n response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=incorrect_json_content,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)\n response_json = json.loads(response.content)\n assert \"error_code\" in response_json\n assert response_json[\"error_code\"] == ErrorCode.Name(MALFORMED_REQUEST)\n assert \"message\" in response_json\n assert \"stack_trace\" in response_json\n\n\ndef test_scoring_server_responds_to_malformed_json_input_with_stacktrace_and_error_code(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n\n malformed_json_content = \"this is,,,, not valid json\"\n response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=malformed_json_content,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)\n response_json = json.loads(response.content)\n assert \"error_code\" in response_json\n assert response_json[\"error_code\"] == ErrorCode.Name(MALFORMED_REQUEST)\n assert \"message\" in response_json\n assert \"stack_trace\" in response_json\n\n\ndef test_scoring_server_responds_to_invalid_pandas_input_format_with_stacktrace_and_error_code(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n\n # The pyfunc scoring server expects a serialized Pandas Dataframe in `split` or `records`\n # format; passing a serialized Dataframe in `table` format should yield a readable error\n pandas_table_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient=\"table\")\n response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=pandas_table_content,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)\n response_json = json.loads(response.content)\n assert \"error_code\" in response_json\n assert response_json[\"error_code\"] == ErrorCode.Name(MALFORMED_REQUEST)\n assert \"message\" in response_json\n assert \"stack_trace\" in response_json\n\n\ndef test_scoring_server_responds_to_incompatible_inference_dataframe_with_stacktrace_and_error_code(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n incompatible_df = pd.DataFrame(np.array(range(10)))\n\n response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=incompatible_df,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)\n response_json = json.loads(response.content)\n assert \"error_code\" in response_json\n assert response_json[\"error_code\"] == ErrorCode.Name(BAD_REQUEST)\n assert \"message\" in response_json\n assert \"stack_trace\" in response_json\n\n\ndef test_scoring_server_responds_to_invalid_csv_input_with_stacktrace_and_error_code(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n\n # Any empty string is not valid pandas CSV\n incorrect_csv_content = \"\"\n response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=incorrect_csv_content,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_CSV)\n response_json = json.loads(response.content)\n assert \"error_code\" in response_json\n assert response_json[\"error_code\"] == ErrorCode.Name(MALFORMED_REQUEST)\n assert \"message\" in response_json\n assert \"stack_trace\" in response_json\n\n\ndef test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_records_orientation(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n\n pandas_record_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient=\"records\")\n response_default_content_type = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=pandas_record_content,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON)\n assert response_default_content_type.status_code == 200\n\n response_records_content_type = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=pandas_record_content,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED)\n assert response_records_content_type.status_code == 200\n\n\ndef test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_split_orientation(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n\n pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient=\"split\")\n response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=pandas_split_content,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)\n assert response.status_code == 200\n\n\ndef test_scoring_server_responds_to_invalid_content_type_request_with_unsupported_content_type_code(\n sklearn_model, model_path):\n mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)\n\n pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient=\"split\")\n response = pyfunc_serve_and_score_model(\n model_path=os.path.abspath(model_path),\n data=pandas_split_content,\n content_type=\"not_a_supported_content_type\")\n assert response.status_code == 415\n" }, { "alpha_fraction": 0.752042293548584, "alphanum_fraction": 0.7530033588409424, "avg_line_length": 51, "blob_id": "8e586c0852186cd786a436ab7123a0400fc1d732", "content_id": "340602e13613440ae42d90ccb5c6edb4415b4070", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2081, "license_type": "permissive", "max_line_length": 104, "num_lines": 40, "path": "/examples/docker/README.rst", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "Dockerized Model Training with MLflow\n-------------------------------------\nThis directory contains an MLflow project that trains a linear regression model on the UC Irvine\nWine Quality Dataset. The project uses a docker image to capture the dependencies needed to run\ntraining code. Running a project in a docker environment (as opposed to conda) allows for capturing\nnon-Python dependencies, e.g. Java libraries. In the future, we also hope to add tools to MLflow\nfor running dockerized projects e.g. on a Kubernetes cluster for scaleout.\n\n\nRunning this Example\n^^^^^^^^^^^^^^^^^^^^\n\nInstall MLflow via `pip install mlflow` and `docker <https://www.docker.com/get-started>`_.\nThen, build a docker image containing MLflow via `docker build examples/docker -t mlflow-docker-example`\nand run the example project via `mlflow run examples/docker -P alpha=0.5`\n\nWhat happens when the project is run?\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nLet's start by looking at the MLproject file, which specifies the docker image in which to run the\nproject via a docker_env field:\n\n```\ndocker_env:\n image: mlflow-docker-example\n```\n\nHere, `image` can be any valid argument to `docker run`, such as the tag, ID or\nURL of a docker image (see `Docker docs <https://docs.docker.com/engine/reference/run/#general-form>`_).\nThe above example references a locally-stored image (mlflow-docker-example) by tag.\n\nRunning `mlflow run examples/docker` builds a new docker image based on `mlflow-docker-example`\nbut also containing our project code, then executes the default (main) project entry point\nwithin the container via `docker run`.\nThis built image will be tagged as `mlflow-docker-example-<git-version>` where git-version is the git \ncommit ID.\n\nEnvironment variables such as MLFLOW_TRACKING_URI are\npropagated inside the container during project execution. When running against a local tracking URI,\ne.g. a local `mlruns` directory, MLflow will mount the host system's tracking directory inside the\ncontainer so that metrics and params logged during project execution are accessible afterwards.\n\n" }, { "alpha_fraction": 0.5941731929779053, "alphanum_fraction": 0.5970455408096313, "avg_line_length": 30.243589401245117, "blob_id": "710db980295c47c9b37f0432cc3f341890dabac0", "content_id": "92e3b1978245f7e70ad2760b954dd80a5bf43096", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2437, "license_type": "permissive", "max_line_length": 92, "num_lines": 78, "path": "/mlflow/server/js/src/components/BaggedCell.js", "repo_name": "tomasatdatabricks/mlflow", "src_encoding": "UTF-8", "text": "import React, { PureComponent } from 'react';\nimport PropTypes from 'prop-types';\nimport { Dropdown, MenuItem } from 'react-bootstrap';\nimport classNames from 'classnames';\nimport ExperimentRunsSortToggle from './ExperimentRunsSortToggle';\nimport EmptyIfClosedMenu from './EmptyIfClosedMenu';\n\nconst styles = {\n metricParamCellContent: {\n display: \"inline-block\",\n maxWidth: 120,\n },\n};\n\nexport default class BaggedCell extends PureComponent {\n static propTypes = {\n keyName: PropTypes.string.isRequired,\n value: PropTypes.string.isRequired,\n setSortByHandler: PropTypes.func.isRequired,\n isParam: PropTypes.bool.isRequired,\n isMetric: PropTypes.bool.isRequired,\n onRemoveBagged: PropTypes.func.isRequired,\n sortIcon: PropTypes.node,\n };\n\n render() {\n const { keyName, value, setSortByHandler, isParam, isMetric, onRemoveBagged,\n sortIcon } = this.props;\n const cellClass = classNames(\"metric-param-content\", \"metric-param-cell\", \"BaggedCell\");\n return (\n <span\n className={cellClass}\n >\n <Dropdown id=\"dropdown-custom-1\" style={{width: 250}}>\n <ExperimentRunsSortToggle\n bsRole=\"toggle\"\n className={\"metric-param-sort-toggle\"}\n >\n <span\n className=\"run-table-container underline-on-hover\"\n style={styles.metricParamCellContent}\n title={keyName}\n >\n {sortIcon}\n {keyName}:\n </span>\n </ExperimentRunsSortToggle>\n <span\n className=\"metric-param-value run-table-container\"\n style={styles.metricParamCellContent}\n >\n {value}\n </span>\n <EmptyIfClosedMenu className=\"mlflow-menu\" bsRole=\"menu\">\n <MenuItem\n className=\"mlflow-menu-item\"\n onClick={() => setSortByHandler(isMetric, isParam, keyName, true)}\n >\n Sort ascending\n </MenuItem>\n <MenuItem\n className=\"mlflow-menu-item\"\n onClick={() => setSortByHandler(isMetric, isParam, keyName, false)}\n >\n Sort descending\n </MenuItem>\n <MenuItem\n className=\"mlflow-menu-item\"\n onClick={() => onRemoveBagged(isParam, keyName)}\n >\n Display in own column\n </MenuItem>\n </EmptyIfClosedMenu>\n </Dropdown>\n </span>\n );\n }\n}\n" } ]
22
Kostimo/Galaxy_Attack_Game
https://github.com/Kostimo/Galaxy_Attack_Game
16a0d8d62ffead8ebd8f142bb57471debb548bdc
ac34a5a8047d8b001b91b1b21d0cacf307407daf
676d702f6cb0dc56421c7f3a900695f3c76851c7
refs/heads/main
2023-02-17T14:22:30.197120
2021-01-18T18:46:32
2021-01-18T18:46:32
320,861,655
4
1
null
null
null
null
null
[ { "alpha_fraction": 0.6991474032402039, "alphanum_fraction": 0.7125456929206848, "avg_line_length": 34.65217208862305, "blob_id": "77508643b7754dc1721df87cda76f51f9b69b395", "content_id": "ccd0eb1cb2d9d32ec2329b05d1b85bea71fd5f49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 821, "license_type": "no_license", "max_line_length": 257, "num_lines": 23, "path": "/README.MD", "repo_name": "Kostimo/Galaxy_Attack_Game", "src_encoding": "UTF-8", "text": "# *Galaxy Attack Game*\n- - - \n```\nTecnologies stack:\n Python 3\n```\n### About the game\n\n* To move the player to the right and left, use the \"A\" and \"D\" keys respectively. Press the space to shoot.\n\n* Dodge or shoot meteorites, the number of which increases with your score. Also, with a *3%* chance, you can knock out bonuses from meteorites. One of which increases health randomly in the range [10; 30], and the other - activates a laser for 1.5 seconds.\n\n* The player has 3 lives. After death, a screen with statistics appears, on which you can see the game time, the player's accuracy and the points scored.\n\n#### To start the game \n`pip install -r requirments.txt` - *install requirments*\n\n#### Here are a couple of screenshots:\n\n![one](./Screenshots/Screenshot_1.jpg)\n\n\n![two](./Screenshots/Screenshot_2.jpg)\n\n" }, { "alpha_fraction": 0.5403851866722107, "alphanum_fraction": 0.5733927488327026, "avg_line_length": 33.37288284301758, "blob_id": "fbb16fef5fdaee9e406ab9c0dcc9f46b4022b721", "content_id": "4c0d4f9ac9a5db711766a3b3e4edc6bb6a5cfb64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21677, "license_type": "no_license", "max_line_length": 133, "num_lines": 590, "path": "/GalaxyAttackGame.py", "repo_name": "Kostimo/Galaxy_Attack_Game", "src_encoding": "UTF-8", "text": "'''Frozen Jam by tgfcoder <https://twitter.com/tgfcoder> licensed under CC-BY-3'''\r\nimport pygame\r\nimport random\r\nimport os\r\n\r\nWIDTH = 480\r\nHEIGHT = 600\r\nFPS = 60\r\n\r\nscore_list = [500, 1000, 2000, 5000, 7500, 10000, 15000, 20000]\r\n\r\n\r\n# Цвета (R, G, B)\r\nBLACK = (0, 0, 0)\r\nWHITE_100 = (255, 255, 255)\r\nWHITE_90 = (230, 230, 230)\r\nWHITE_80 = (229, 233, 240)\r\nWHITE_70 = (216, 222, 233)\r\n\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\n\r\n# Игра и окно\r\npygame.init()\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"GALAXY ATTACK!!!\")\r\nclock = pygame.time.Clock()\r\n\r\n# Загрузка всей игровой графики\r\nmain = os.path.dirname(__file__)\r\nimg_folder = os.path.join(main, \"img\")\r\nsound_folder = os.path.join(main, \"sound\")\r\n\r\nbackground = pygame.transform.scale(pygame.image.load(os.path.join(img_folder, \"starfield2.jpg\")).convert(), (WIDTH, HEIGHT))\r\nbackground_rect = background.get_rect()\r\nenemy_img = pygame.transform.scale(pygame.image.load(os.path.join(img_folder, \"enemyGreen1.png\")).convert(), (50,40))\r\nplayer_img = pygame.image.load(os.path.join(img_folder, \"playerShip1_orange.png\")).convert()\r\nplayer_mini_img = pygame.transform.scale(player_img, (25,19))\r\nplayer_mini_img.set_colorkey(BLACK)\r\naccuracy_img = pygame.image.load(os.path.join(img_folder, \"accuracy.png\")).convert()\r\naccuracy_img.set_colorkey(BLACK)\r\n\r\nweapons = {\r\n \"bullet\": pygame.transform.scale(pygame.image.load(os.path.join(img_folder, \"laserRed07.png\")).convert(), (13, 35)),\r\n \"laser\": pygame.transform.scale(pygame.image.load(os.path.join(img_folder, \"laserRed.jpg\")).convert(), (11,10))\r\n}\r\nenemy_bullet = pygame.transform.scale(pygame.image.load(os.path.join(img_folder, \"laserGreen13.png\")).convert(), (13, 35))\r\n\r\nmeteor_images = []\r\nmeteor_images_list =['meteorBrown_big1.png','meteorBrown_med1.png',\r\n 'meteorBrown_med1.png','meteorBrown_med3.png',\r\n 'meteorBrown_small1.png','meteorBrown_small2.png',\r\n 'meteorBrown_tiny1.png']\r\nfor img in meteor_images_list:\r\n meteor_images.append(pygame.image.load(os.path.join(img_folder, img)).convert())\r\n\r\nexplosion_animation = {\r\n \"large\": [],\r\n \"small\": [],\r\n \"player\": []\r\n}\r\nfor i in range(9):\r\n filename = f'regularExplosion0{i}.png'\r\n img = pygame.image.load(os.path.join(img_folder, filename)).convert()\r\n img.set_colorkey(BLACK)\r\n img_large = pygame.transform.scale(img, (75, 75))\r\n explosion_animation['large'].append(img_large)\r\n img_small = pygame.transform.scale(img, (32, 32))\r\n explosion_animation['small'].append(img_small)\r\n filename = f\"sonicExplosion0{i}.png\"\r\n img = pygame.image.load(os.path.join(img_folder, filename)).convert()\r\n img.set_colorkey(BLACK)\r\n explosion_animation[\"player\"].append(img)\r\n\r\npowerup_images = {\r\n \"shield\": pygame.image.load(os.path.join(img_folder, \"shield_silver.png\")).convert(),\r\n \"weapon\": pygame.image.load(os.path.join(img_folder, \"bold_silver.png\")).convert()\r\n}\r\n\r\n# Загрузка мелодий игры \r\nshoot_sound = pygame.mixer.Sound(os.path.join(sound_folder, \"pew.wav\"))\r\nexpl_sounds = []\r\nfor expl in (\"expl1.wav\", \"expl2.wav\"):\r\n expl_sounds.append(pygame.mixer.Sound(os.path.join(sound_folder, expl)))\r\nfor snd in expl_sounds:\r\n snd.set_volume(0.1)\r\npygame.mixer.music.load(os.path.join(sound_folder, \"tgfcoder-FrozenJam-SeamlessLoop.ogg\"))\r\n\r\n\r\n# Класс игрока\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.lives = 3\r\n self.hidden = False\r\n self.super = False\r\n self.super_timer = pygame.time.get_ticks()\r\n self.hide_timer = pygame.time.get_ticks()\r\n self.shield = 100\r\n self.image = pygame.transform.scale(player_img, (50, 40)) \r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.radius = 20\r\n #pygame.draw.circle(self.image, RED, self.rect.center, self.radius)\r\n self.rect.centerx = WIDTH//2\r\n self.rect.bottom = HEIGHT-25\r\n self.speedx = 0\r\n \r\n def shoot(self):\r\n bullet = Bullet(self.rect.centerx, self.rect.top, \"bullet\")\r\n all_sprites.add(bullet)\r\n bullets.add(bullet)\r\n shoot_sound.set_volume(0.1)\r\n shoot_sound.play()\r\n \r\n \r\n def super_shoot(self):\r\n bullet = Bullet(self.rect.centerx+1, self.rect.top-2, \"laser\")\r\n all_sprites.add(bullet)\r\n bullets.add(bullet)\r\n shoot_sound.set_volume(0.06)\r\n shoot_sound.play()\r\n\r\n def hide(self):\r\n self.hidden = True\r\n self.hide_timer = pygame.time.get_ticks()\r\n self.rect.center = (WIDTH/2, -200)\r\n\r\n\r\n def update(self):\r\n self.speedx = 0\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_a]:\r\n self.speedx = -8\r\n if keys[pygame.K_d]:\r\n self.speedx = 8\r\n self.rect.x += self.speedx\r\n if self.rect.right > WIDTH:\r\n self.rect.right = WIDTH\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n\r\n now = pygame.time.get_ticks()\r\n if self.hidden and now - self.hide_timer >= 1000:\r\n self.hidden = False\r\n self.rect.centerx = WIDTH // 2\r\n self.rect.bottom = HEIGHT-25\r\n if self.super and not (now - self.super_timer >= 1500):\r\n self.super_shoot()\r\n else:\r\n self.super = False\r\n \r\n# Класс врага\r\nclass Enemy(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = enemy_img\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.x = 0\r\n self.speedx = 0\r\n self.rect.y = -60\r\n self.speedy = 3\r\n self.shoot_delay = 300\r\n self.last_shoot = pygame.time.get_ticks()\r\n\r\n def shoot(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.last_shoot >= self.shoot_delay:\r\n self.last_shoot = now\r\n ebullet = enemyBullet(self.rect.centerx, self.rect.bottom)\r\n enemy_bullets.add(ebullet)\r\n all_sprites.add(ebullet)\r\n shoot_sound.set_volume(0.09)\r\n shoot_sound.play()\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n self.rect.x += self.speedx\r\n if self.rect.top >= 25:\r\n self.shoot()\r\n self.speedy = 0\r\n if self.rect.right >= WIDTH:\r\n self.speedx = -3\r\n if self.rect.left <= 0:\r\n self.speedx = 3\r\n \r\n# Класс моба\r\nclass Mob(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image_original = random.choice(meteor_images)\r\n self.image_original.set_colorkey(BLACK)\r\n self.image = self.image_original.copy()\r\n self.rect = self.image.get_rect()\r\n self.radius = int(self.rect.width*0.85//2)\r\n #pygame.draw.circle(self.image, RED, self.rect.center, self.radius)\r\n self.rect.x = random.randrange(WIDTH - self.rect.width)\r\n self.rect.y = random.randrange(-200, -100)\r\n self.speedx = random.randrange(-2, 2)\r\n self.speedy = random.randrange(3, 9)\r\n self.rot = 0\r\n self.rot_speed = random.randrange(-8, 8)\r\n self.last_update = pygame.time.get_ticks()\r\n \r\n def rotate(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.last_update > 50:\r\n self.last_update = now\r\n self.rot += self.rot_speed % 360\r\n image_copy = pygame.transform.rotate(self.image_original, self.rot)\r\n mob_center = self.rect.center\r\n self.image = image_copy\r\n self.rect = self.image.get_rect()\r\n self.rect.center = mob_center\r\n\r\n def update(self):\r\n self.rotate()\r\n self.rect.y += self.speedy\r\n self.rect.x += self.speedx\r\n if self.rect.top > HEIGHT + 40 or self.rect.left < (-20-self.rect.width) or self.rect.right > (WIDTH + 20 + self.rect.width):\r\n self.rect.x = random.randrange(WIDTH - self.rect.width)\r\n self.rect.y = random.randrange(-200, -100)\r\n self.speedx = random.randrange(-2, 2)\r\n self.speedy = random.randrange(3, 9)\r\n\r\n# Класс снарядов игрока\r\nclass Bullet(pygame.sprite.Sprite):\r\n def __init__(self, x, y, type):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.type = type\r\n self.image = weapons[self.type]\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = x\r\n self.rect.bottom = y\r\n self.speedy = -10\r\n \r\n def update(self):\r\n self.rect.y += self.speedy\r\n # Убить, если пуля заходит за верхнюю часть экрана\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\n# Класс снарядов врага\r\nclass enemyBullet(pygame.sprite.Sprite):\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = enemy_bullet\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = x\r\n self.rect.top = y\r\n self.speedy = 10\r\n \r\n def update(self):\r\n self.rect.y += self.speedy\r\n # Убить, если пуля заходит за нижнюю часть экрана\r\n if self.rect.bottom > HEIGHT:\r\n self.kill()\r\n\r\n# Класс анимации взрывов\r\nclass Explosion(pygame.sprite.Sprite):\r\n FRAME_RATE = 50\r\n\r\n def __init__(self, center, type):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.type = type\r\n self.image = explosion_animation[self.type][0]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n self.frame = 0\r\n self.last_update = pygame.time.get_ticks()\r\n\r\n def update(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.last_update > Explosion.FRAME_RATE:\r\n self.last_update = now\r\n self.frame += 1\r\n if self.frame == len(explosion_animation[self.type]):\r\n self.kill()\r\n else:\r\n center = self.rect.center\r\n self.image = explosion_animation[self.type][self.frame]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n\r\n# Класс усилений\r\nclass PowerUp(pygame.sprite.Sprite):\r\n def __init__(self, center):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.type = random.choice([\"shield\", \"weapon\"])\r\n self.image = powerup_images[self.type]\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n self.speedy = 2\r\n \r\n def update(self):\r\n self.rect.y += self.speedy\r\n # Убить, если заходит за нижнюю часть экрана\r\n if self.rect.bottom > HEIGHT:\r\n self.kill()\r\n\r\n# Фон\r\nclass Background (pygame.sprite.Sprite):\r\n def __init__(self, y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = background\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (WIDTH / 2, -y + HEIGHT / 2)\r\n self.y = y\r\n self.speedy = 6\r\n \r\n def update(self):\r\n self.rect.y += self.speedy\r\n if self.rect.y >= -self.y + HEIGHT:\r\n self.rect.y = -self.y\r\n\r\ndef new_mob():\r\n m = Mob()\r\n all_sprites.add(m)\r\n mobs.add(m)\r\n\r\n# Текст\r\nfont_name = pygame.font.match_font(\"ObelixPro\")\r\ndef draw_text(surf, text, color, size, x, y):\r\n font = pygame.font.Font(font_name, size)\r\n surface_font = font.render(text, True, color)\r\n surface_font_rect = surface_font.get_rect()\r\n surface_font_rect.midtop = (x,y)\r\n surf.blit(surface_font, surface_font_rect)\r\n\r\n# Здоровье\r\ndef draw_shield_bar(surf, x, y, life_points):\r\n if life_points < 0:\r\n life_points = 0\r\n BAR_WIDTH = 100;\r\n BAR_HEIGHT = 10;\r\n fill_width = (life_points/100)*BAR_WIDTH \r\n fill_rect = pygame.Rect(x, y, fill_width, BAR_HEIGHT)\r\n border_rect = pygame.Rect(x, y, BAR_WIDTH, BAR_HEIGHT)\r\n pygame.draw.rect(surf, WHITE_100, border_rect)\r\n pygame.draw.rect(surf, RED, fill_rect) \r\n\r\n# Кол-во жизней\r\ndef draw_lives(surf, x, y, lives, img): \r\n for i in range(lives):\r\n img_rect = img.get_rect()\r\n img_rect.x = x + 30*i\r\n img_rect.y = y\r\n surf.blit(img, img_rect)\r\n\r\n# Точность игрока\r\ndef draw_accuracy(surf, x, y, img): \r\n img_rect = img.get_rect()\r\n img_rect.x = x\r\n img_rect.y = y\r\n surf.blit(img, img_rect)\r\n\r\n# Экран-меню\r\ndef show_menu_screen():\r\n screen.blit(background, background_rect)\r\n draw_text(screen, \"GALAXY ATTACK\", WHITE_70, 40, WIDTH/2, HEIGHT/4)\r\n draw_text(screen, \"Keys \\\"A\\\" and \\\"D\\\" for movement, space to fire\", WHITE_70, 16, WIDTH/2, HEIGHT/2)\r\n draw_text(screen, \"Collect special bonus for super\", WHITE_70, 16, WIDTH/2, HEIGHT/2 + 40)\r\n draw_text(screen, \"Press \\\"ENTER\\\" to start\", WHITE_70, 22, WIDTH/2, HEIGHT*0.75)\r\n pygame.display.flip()\r\n waiting = True\r\n while waiting:\r\n menu_time = pygame.time.get_ticks()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_RETURN:\r\n waiting = False\r\n return menu_time\r\n\r\n# Вывод статистики после проигрыша\r\ndef show_statistics(time):\r\n screen.blit(background, background_rect)\r\n draw_text(screen, \"STATISTICS\", WHITE_80, 54, WIDTH/2, 80)\r\n draw_text(screen, f\"Score: {score}\", WHITE_80, 20, 107, HEIGHT/4.5 + 100)\r\n draw_text(screen, f\"Accuracy: {accuracy}%\", WHITE_80, 20, 140, HEIGHT/4.5 + 150)\r\n time = \"%.2f\" % (time / 1000)\r\n draw_text(screen, f\"Time: {time} s\", WHITE_80, 20, 115.5, HEIGHT/4.5 + 200)\r\n draw_text(screen, \"Press \\\"ENTER\\\" to continue\", WHITE_80, 20, WIDTH/2, HEIGHT * 0.75)\r\n pygame.display.flip()\r\n waiting = True\r\n while waiting:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_RETURN:\r\n waiting = False\r\n\r\n\r\nenemy_alive = False\r\n# Цикл игры\r\nMENU = True\r\nGAME = True\r\npygame.mixer.music.play(loops=-1)\r\nwhile GAME:\r\n clock.tick(FPS)\r\n pygame.mixer.music.set_volume(0.1)\r\n if MENU:\r\n pygame.mixer.music.set_volume(0.02)\r\n MENU = False\r\n menu_time = show_menu_screen()\r\n all_sprites = pygame.sprite.Group()\r\n mobs = pygame.sprite.Group()\r\n bullets = pygame.sprite.Group()\r\n enemy_bullets = pygame.sprite.Group()\r\n powerups = pygame.sprite.Group()\r\n bg1 = Background(0)\r\n bg2 = Background(HEIGHT)\r\n all_sprites.add(bg1)\r\n all_sprites.add(bg2)\r\n player = Player()\r\n all_sprites.add(player)\r\n for _ in range(2):\r\n new_mob()\r\n score = 0\r\n score_list = [500, 1000, 2000, 5000, 7500, 10000, 15000, 20000]\r\n lucky_hits = 0 \r\n number_of_shots = 0 \r\n accuracy = 0\r\n \r\n# Подсчет продолжительности игры\r\n game_time = pygame.time.get_ticks() - menu_time\r\n\r\n\r\n if player.lives == 0 and not death_explosion.alive():\r\n pygame.mixer.music.set_volume(0.02)\r\n show_statistics(game_time)\r\n MENU = True\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n GAME = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n player.shoot()\r\n number_of_shots += 1\r\n\r\n# Начало игры - 2 моба\r\n# 500 очков - 4 моба\r\n# 1000 очков - 6 мобов\r\n# 2000 очков - 8 мобов\r\n# 5000 очков - 10 мобов\r\n# 7500 очков - 12 мобов\r\n# 10000 очков - 14 мобов\r\n# 15000 очков - 16 мобов\r\n# 20000 очков - 18 мобов\r\n\r\n\r\n# Повышение сложности в зависимости от колво очков\r\n if len(score_list) != 0:\r\n level = score_list[0]\r\n if score >= level:\r\n if level == 5000 or level == 7500 or level == 10000 or level == 15000 or level == 20000:\r\n enemy = Enemy()\r\n all_sprites.add(enemy)\r\n enemy_alive = True\r\n for _ in range(2):\r\n new_mob()\r\n bg1.speedy += 2\r\n bg2.speedy += 2\r\n score_list.pop(0)\r\n\r\n# Повышение сложности в зависимости от времени\r\n # if 10000 <= pygame.time.get_ticks() - menu_time <= 10016:\r\n # for _ in range(2):\r\n # new_mob()\r\n # bg1.speedy = 8\r\n # bg2.speedy = 8\r\n # elif 20000 <= pygame.time.get_ticks() - menu_time <= 20016:\r\n # for _ in range(2):\r\n # new_mob()\r\n # bg1.speedy = 10\r\n # bg2.speedy = 10\r\n # elif 30000 <= pygame.time.get_ticks() - menu_time <= 30016:\r\n # for _ in range(2):\r\n # new_mob()\r\n # bg1.speedy = 12\r\n # bg2.speedy = 12\r\n # elif 40000 <= pygame.time.get_ticks() - menu_time <= 40016:\r\n # for _ in range(2):\r\n # new_mob()\r\n # bg1.speedy = 14\r\n # bg2.speedy = 14\r\n # elif 70000 <= pygame.time.get_ticks() - menu_time <= 70016:\r\n # for _ in range(4):\r\n # new_mob()\r\n # bg1.speedy = 16\r\n # bg2.speedy = 16\r\n # elif 100000 <= pygame.time.get_ticks() - menu_time <= 100016:\r\n # for _ in range(2):\r\n # new_mob()\r\n # bg1.speedy = 18\r\n # bg2.speedy = 18\r\n # elif 120000 <= pygame.time.get_ticks() - menu_time <= 120016:\r\n # for _ in range(6):\r\n # new_mob()\r\n # bg1.speedy = 20\r\n # bg2.speedy = 20\r\n\r\n# Проверка, не ударил ли моб игрока\r\n hits_with_player = pygame.sprite.spritecollide(player, mobs, True, pygame.sprite.collide_circle)\r\n for hit in hits_with_player:\r\n player.shield -= hit.radius*3\r\n small_expl = Explosion(hit.rect.center, \"small\")\r\n all_sprites.add(small_expl)\r\n new_mob()\r\n if player.shield <= 0:\r\n death_explosion = Explosion(player.rect.center, \"player\")\r\n all_sprites.add(death_explosion)\r\n player.lives -= 1\r\n player.shield = 100\r\n player.hide()\r\n \r\n# Проверка столкновений пуль и мобов\r\n hits_with_bullets = pygame.sprite.groupcollide(mobs, bullets, True, True)\r\n for hit in hits_with_bullets:\r\n score += 50 - hit.radius\r\n if hits_with_bullets[hit][0].type == \"bullet\":\r\n lucky_hits += 1\r\n random.choice(expl_sounds).play()\r\n large_expl = Explosion(hit.rect.center, \"large\")\r\n all_sprites.add(large_expl)\r\n new_mob()\r\n if random.randint(0,100) > 97:\r\n pow = PowerUp(hit.rect.center)\r\n all_sprites.add(pow)\r\n powerups.add(pow)\r\n \r\n# Проверка столкновений игрока и усилений\r\n hits_with_powerups = pygame.sprite.spritecollide(player, powerups, True)\r\n for hit in hits_with_powerups:\r\n if hit.type == \"shield\":\r\n player.shield += random.randrange(10, 30)\r\n if player.shield >= 100:\r\n player.shield = 100\r\n if hit.type == \"weapon\":\r\n player.super = True\r\n player.super_timer = pygame.time.get_ticks()\r\n\r\n# Проверка столкновений игрока и вражеских снарядов\r\n hits_with_enemybullets = pygame.sprite.spritecollide(player, enemy_bullets, True)\r\n for hit in hits_with_enemybullets:\r\n player.shield -= 45\r\n small_expl = Explosion(hit.rect.center, \"small\")\r\n all_sprites.add(small_expl)\r\n if player.shield <= 0:\r\n death_explosion = Explosion(player.rect.center, \"player\")\r\n all_sprites.add(death_explosion)\r\n player.lives -= 1\r\n player.shield = 100\r\n player.hide()\r\n\r\n# Проверка столкновений врага и снарядов игрока\r\n if enemy_alive:\r\n enemy_and_bullets = pygame.sprite.spritecollide(enemy, bullets, True)\r\n for hit in enemy_and_bullets:\r\n enemy.kill()\r\n enemy_alive = False\r\n death_explosion = Explosion(enemy.rect.center, \"player\")\r\n all_sprites.add(death_explosion)\r\n\r\n# Подсчет точности игрока\r\n if number_of_shots != 0:\r\n accuracy = \"%.2f\" % (lucky_hits / number_of_shots * 100)\r\n\r\n# Обновление спрайтов\r\n all_sprites.update()\r\n# Рендеринг\r\n screen.blit(background, background_rect)\r\n all_sprites.draw(screen)\r\n draw_text(screen, str(score), WHITE_90, 20, WIDTH//2, 10)\r\n draw_text(screen, str(accuracy)+\"%\", WHITE_90, 14, 65, 23)\r\n draw_accuracy(screen, 5, 20, accuracy_img)\r\n draw_shield_bar(screen, 5, 5, player.shield)\r\n draw_lives(screen, WIDTH-100, 5, player.lives, player_mini_img)\r\n# Обновление дисплея\r\n pygame.display.flip()\r\n\r\npygame.quit()\r\n\r\n\r\n" } ]
2
pereperi/Gotoh-algorithm
https://github.com/pereperi/Gotoh-algorithm
d73a07d44fde5f01abd6026563f3c9291189b052
6b20fac3b6ef6ad7fa1883bb8cb212c0730402e2
aea76cf48305d8ba7166b4cac6de4314957b92c2
refs/heads/main
2023-04-06T18:50:54.973255
2021-04-12T16:21:19
2021-04-12T16:21:19
357,260,727
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5435593724250793, "alphanum_fraction": 0.5758174061775208, "avg_line_length": 23.88524627685547, "blob_id": "bd72cae981c4bc22d5b14946f64177191a78fe5d", "content_id": "af1ab19a146ba3f12c2fc4aaae130500e52920c9", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4557, "license_type": "permissive", "max_line_length": 135, "num_lines": 183, "path": "/gotoh.py", "repo_name": "pereperi/Gotoh-algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport re\nfrom Bio import SeqIO\n\ndef read_matrix(filename):\t#function to read substitution matrix\n\tmatrix = {}\t#initialize matrix\n\tf = open(filename)\t#open file\n\tchars = f.readline().rstrip().split('\\t')\t#read first line (separated by tabs)\n\taa_alphabet = []\t#initialize a list that will contain all aa\n\tfor x in chars[1:]:\n\t\taa_alphabet.append(x)\t#add the amino acids to aa_alphabet\n\tlines = f.readlines()\t#read the rest of the lines\n\trow = 0\t#to keep track of which row we are reading\n\tfor line in lines:\t#for every line (row)\n\t\tmatrix[aa_alphabet[row]] = {}\t#initialize a dictionary inside the main dictionary for the aa we are reading (one different every row)\n\t\tvalues = line.rstrip().split('\\t')\t#get the line splitted by tabs and without the endlines\n\t\tfor col in range(len(aa_alphabet)):\t#every element in values contains a value for one aa, with the same order as aa_alphabet\n\t\t\tmatrix[aa_alphabet[row]][aa_alphabet[col]] = int(values[col+1])\t#add that value to the dictionary of the aa of the current row\n\t\trow += 1\t#when done with the whole line, increase row by one and repeat process\n\treturn matrix\t#return the matrix (as a dictionary of dictionaries), and the list of all amino acids\n\n\t\t\nrecord = list(SeqIO.parse(sys.argv[1], \"fasta\"))\n\nseqI = re.sub(r\"[\\n-]\", \"\", \"\".join(record[0].seq))\nseqJ = re.sub(r\"[\\n-]\", \"\", \"\".join(record[1].seq))\n\nmatrix = read_matrix(sys.argv[2])\nlenI = len(seqI)\nlenJ = len(seqJ)\n\ngop = -10\ngep = -0.5\n\nm = [[0 for x in range(lenJ + 1)] for y in range(lenI + 1)]\nix = [[0 for x in range(lenJ + 1)] for y in range(lenI + 1)]\niy = [[0 for x in range(lenJ + 1)] for y in range(lenI + 1)]\ntbm = [[0 for x in range(lenJ + 1)] for y in range(lenI + 1)]\ntbx = [[0 for x in range(lenJ + 1)] for y in range(lenI + 1)]\ntby = [[0 for x in range(lenJ + 1)] for y in range(lenI + 1)]\n\nfor i in range(1, lenI + 1):\n\tiy[i][0] = gop + (i-1) * gep\n\tix[i][0] = float('-inf')\n\tm[i][0] = float('-inf')\n\ttbm[i][0] = 1\n\ttbx[i][0] = 1\n\ttby[i][0] = 1\n\t\n\nfor j in range(1, lenJ + 1):\n\tix[0][j] = gop + (j-1) * gep\n\tiy[0][j] = float('-inf')\n\tm[0][j] = float('-inf')\n\ttbm[0][j] = -1\n\ttbx[0][j] = -1\n\ttby[0][j] = -1\n\nfor i in range(1, lenI + 1):\n\tfor j in range(1, lenJ + 1):\n\t\ts = matrix[seqI[i-1]][seqJ[j-1]]\n\t\t#M\n\t\tsub = m[i-1][j-1] + s\n\t\tx = ix[i-1][j-1] + s\n\t\ty = iy[i-1][j-1] + s\n\t\tif sub >= x and sub >= y:\n\t\t\tm[i][j] = sub\n\t\t\ttbm[i][j] = 0\n\t\telif x > y:\n\t\t\tm[i][j] = x\n\t\t\ttbm[i][j] = -1\n\t\telse:\n\t\t\tm[i][j] = y\n\t\t\ttbm[i][j] = 1\n\t\t#Ix\t\n\t\tsub = m[i][j-1] + gop\n\t\tx = ix[i][j-1] + gep\n\t\tif sub >= x:\n\t\t\tix[i][j] = sub\n\t\t\ttbx[i][j] = 0\n\t\telse:\n\t\t\tix[i][j] = x\n\t\t\ttbx[i][j] = -1\n\t\t#Iy\n\t\tsub = m[i-1][j] + gop\n\t\ty = iy[i-1][j] + gep\n\t\tif sub >= y:\n\t\t\tiy[i][j] = sub\n\t\t\ttby[i][j] = 0\n\t\telse:\n\t\t\tiy[i][j] = y\n\t\t\ttby[i][j] = 1\n\n\nprint('Optimal score: ', max(m[i][j], ix[i][j], iy[i][j]))\ni = lenI\nj = lenJ\nalnI = []\nalnJ = []\n\nif m[i][j] >= ix[i][j] and m[i][j] >= iy[i][j]:\n\tstate = 0\nelif ix[i][j] > iy[i][j]:\n\tstate = -1\nelse:\n\tstate = 1\n\t\nwhile i != 0 or j != 0:\n\tif state == 0:\n\t\tstate = tbm[i][j]\n\t\ti += -1\n\t\tj += -1\n\t\talnI.append(seqI[i])\n\t\talnJ.append(seqJ[j])\n\telif state == -1:\n\t\tstate = tbx[i][j]\n\t\tj += -1\n\t\talnI.append(\"-\")\n\t\talnJ.append(seqJ[j])\n\telse:\n\t\tstate = tby[i][j]\n\t\ti += -1\n\t\talnI.append(seqI[i])\n\t\talnJ.append(\"-\")\n\nseqI_aln = \"\".join(reversed(alnI))\nseqJ_aln = \"\".join(reversed(alnJ))\n\nsymbols = ''\nidentity = 0\nsimilarity = 0\ntotal = 0\nposI = 0\nposJ = 0\npos = {}\nfor x in range(len(seqI_aln)):\n\tif seqI_aln[x] == seqJ_aln[x]:\n\t\tsymbols += '*'\n\t\tidentity += 1\n\t\tsimilarity += 1\n\t\ttotal += 1\n\t\tposI += 1\n\t\tposJ += 1\n\telif seqI_aln[x] != '-' and seqJ_aln[x] != '-':\t\n\t\ttotal += 1\n\t\tposI += 1\n\t\tposJ += 1\n\t\tif matrix[seqI_aln[x]][seqJ_aln[x]] in (0,1):\n\t\t\tsymbols += '.'\n\t\t\tsimilarity += 1\n\t\telif matrix[seqI_aln[x]][seqJ_aln[x]] >= 2:\n\t\t\tsimilarity += 1\n\t\t\tsymbols += ':'\n\t\telse:\n\t\t\tsymbols += ' '\n\telse:\n\t\tsymbols += ' '\n\t\tif seqI_aln[x] != '-':\n\t\t\tposI += 1\n\t\telif seqJ_aln[x] != '-':\n\t\t\tposJ += 1\n\tif (x+1)%100 == 0:\n\t\tpos[x+1] = [posI,posJ]\t#for each slice of 100 that will be printed, at which position of each sequence am I?\n\nprint('Identity:', str((identity/total)*100)+'%', f\"({identity}/{total})\")\t\nprint('Similarity:', str((similarity/total)*100)+'%', f\"({similarity}/{total})\")\nprint()\n\nl = len(seqI_aln)\npart = l//100\nind = 0\nfor x in range(part):\n\tprint(seqI_aln[ind:ind+100], pos[ind+100][0])\n\tprint(symbols[ind:ind+100])\n\tprint(seqJ_aln[ind:ind+100], pos[ind+100][1])\n\tprint()\n\tind += 100\n\nprint(seqI_aln[ind:], lenI)\nprint(symbols[ind:])\nprint(seqJ_aln[ind:], lenJ)\n\t\n\n" }, { "alpha_fraction": 0.8017751574516296, "alphanum_fraction": 0.8017751574516296, "avg_line_length": 83.5, "blob_id": "19b6931d78fa034944b6da45ca638a27f049f5ae", "content_id": "f12e23a8f4f0756bfce6abb4120dba25135b7f1d", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 338, "license_type": "permissive", "max_line_length": 147, "num_lines": 4, "path": "/README.md", "repo_name": "pereperi/Gotoh-algorithm", "src_encoding": "UTF-8", "text": "# Gotoh-algorithm\nImplementation in python of the Gotoh algorithm for making pairwise alignments of sequences using affine gap penalties\nInput needs to be a .fasta file containing both sequences (as seen in seq.fasta as an example), and a substitution matrix, that MUST have the same \nformat as the matrix.txt file (added as an example).\n" } ]
2
marcinossowski/libros-game
https://github.com/marcinossowski/libros-game
c67df6f47928c2be39d578fe57afc754778d399c
488200fde5f5a4c6fa8832b5ed52ddb06ccdd49d
4b7ab4f3c69ae6f24d195b2e678ff6c587f1e732
refs/heads/master
2020-12-25T10:59:41.239151
2015-02-04T16:00:48
2015-02-04T16:00:48
30,303,591
0
0
null
2015-02-04T14:48:57
2015-02-03T14:25:38
2015-02-03T14:25:38
null
[ { "alpha_fraction": 0.5269172787666321, "alphanum_fraction": 0.5404102802276611, "avg_line_length": 26.100746154785156, "blob_id": "e8c37cfb290665928694f2939113e510a4034883", "content_id": "f7054be1a03ca5658007396930a3aed4800f0938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7263, "license_type": "no_license", "max_line_length": 75, "num_lines": 268, "path": "/libros/game.py", "repo_name": "marcinossowski/libros-game", "src_encoding": "UTF-8", "text": "import random\nimport string\n\nfrom copy import copy\nfrom itertools import cycle\n\n\nACTION_TAKE_CARD = 0\nACTION_PILE_CARD = 1\nACTION_SHOW_CARD = 2\nACTION_DISCARD_CARD = 3\nACTION_TAKE_PUBLIC_CARD = 4\n\nACTIONS = [\n ACTION_TAKE_CARD, ACTION_PILE_CARD,\n ACTION_SHOW_CARD, ACTION_DISCARD_CARD,\n ACTION_TAKE_PUBLIC_CARD,\n]\n\nCOLORS = ('blue', 'brown', 'red', 'orange', 'green')\n\n\ndef deal(players, cards_to_remove=None, gold_to_remove=None):\n assert players in [2, 3, 4]\n\n if cards_to_remove is None:\n cards_to_remove = {2: 21, 3: 12, 4: 7}.get(players)\n\n if gold_to_remove is None:\n gold_to_remove = 4 - players\n\n cards = (\n ('blue', 2, 4),\n ('blue', 3, 3),\n ('blue', 4, 2),\n ('brown', 2, 4),\n ('brown', 3, 3),\n ('brown', 4, 2),\n ('red', 1, 7),\n ('red', 2, 2),\n ('orange', 1, 7),\n ('orange', 2, 2),\n ('green', 1, 7),\n ('green', 2, 2),\n ('change', -2, 2),\n ('change', -1, 2),\n ('change', 2, 2),\n ('change', 1, 2),\n ('change', 0, 1), # plus or minus\n ('gold', 1, 11 - gold_to_remove),\n ('gold', 2, 11 - gold_to_remove),\n ('gold', 3, 11 - gold_to_remove))\n deck = []\n letters = {}\n for color in COLORS:\n letters[color] = (x for x in string.ascii_uppercase)\n for kind, value, count in cards:\n deck += [{\n 'type': kind,\n 'value': value,\n 'letter': letters.get(kind, cycle([None])).next(),\n } for _ in xrange(count)]\n random.shuffle(deck)\n\n return deck[cards_to_remove:]\n\n\nclass Game(object):\n def __init__(self):\n self.players = []\n self.players_cycle = []\n self.deck = None\n self.state = 'waiting'\n self.player = None\n self.player_turns_left = 0\n self.pile = []\n self.public = []\n self.discarded = []\n self.dice = {color: 3 for color in COLORS}\n\n def join(self, player):\n self.players.append(player)\n player.join(self, len(self.players))\n\n def start(self):\n assert self.player_count in [2, 3, 4]\n\n self.state = 'start'\n self.player_turns_left = self.turns_per_player\n self.deck = deal(self.player_count)\n self.players_cycle = cycle(self.players)\n\n self.state = 'next_player'\n self.next_player()\n\n @property\n def turns_per_player(self):\n # 1 into hand + 1 into pile + (player_count - 1) to the front\n return 2 + self.player_count - 1\n\n @property\n def player_count(self):\n return len(self.players)\n\n @property\n def deck_count(self):\n return len(self.deck)\n\n @property\n def pile_count(self):\n return len(self.pile)\n\n @property\n def discarded_count(self):\n return len(self.discarded)\n\n @property\n def public_count(self):\n return len(self.public)\n\n def next_player(self):\n if self.state == 'next_player':\n self.state = 'turn'\n self.reset_actions()\n self.player = next(self.players_cycle)\n self.player_turns_left = self.turns_per_player\n elif self.state == 'public':\n self.player = next(self.players_cycle)\n else:\n raise ValueError('Incorrect state.')\n\n @property\n def active_player(self):\n return self.player\n\n @property\n def turns_left(self):\n return self.player_turns_left\n\n def turn(self):\n if self.state == 'turn':\n assert self.deck\n assert self.turns_left > 0\n card = self.deck.pop()\n self.player_turns_left -= 1\n elif self.state == 'public':\n card = self.active_player.choose_public_card(copy(self.public))\n else:\n raise ValueError('Incorrect state.')\n\n return self.active_player, card, self.valid_actions(card)\n\n def use_change_card(self, card, colors):\n value = card['value']\n assert card['kind'] == 'change'\n assert len(colors) == 0 or len(colors) == max(abs(value), 1)\n\n if not colors:\n return\n\n if value == 0:\n value = colors[0] == '+' and 1 or -1\n colors = [colors[0][1:]]\n for color in colors:\n if value < 0:\n self.dice[color] -= 1\n else:\n self.dice[color] += 1\n \n\n def turn_complete(self, player):\n if self.turns_left == 0 and self.public:\n self.state = 'public'\n self.next_player()\n elif self.deck_count == 0:\n self.state = 'auction'\n elif self.state == 'public' and not self.public:\n self.next_player() # == last active player => skip take public\n self.state = 'next_player'\n self.next_player()\n elif self.turns_left == 0:\n self.state = 'next_player'\n self.next_player()\n\n def valid_actions(self, card):\n if self.state == 'public':\n return [ACTION_TAKE_PUBLIC_CARD]\n\n actions = copy(ACTIONS)\n actions.remove(ACTION_TAKE_PUBLIC_CARD)\n actions.remove(ACTION_DISCARD_CARD) # TODO: depends on card\n\n if self.action_show == self.player_count - 1:\n actions.remove(ACTION_SHOW_CARD)\n if self.action_pile:\n actions.remove(ACTION_PILE_CARD)\n if self.action_take_card:\n actions.remove(ACTION_TAKE_CARD)\n\n return actions\n\n def reset_actions(self):\n (self.action_discard, self.action_pile, self.action_take_card,\n self.action_show, self.action_take_public) = (0, 0, 0, 0, 0)\n\n def pile_card(self, card):\n self.action_pile += 1\n self.pile.append(card)\n\n def show_card(self, card):\n self.action_show += 1\n self.public.append(card)\n\n def discard_card(self, card):\n self.action_discard += 1\n self.discarded.append(card)\n\n def take_public_card(self, card):\n self.action_take_public += 1\n self.public.remove(card)\n\n def take_card(self, card):\n self.action_take_card += 1\n\n\nclass Player(object):\n def __init__(self):\n self.game = None\n self.cards = []\n self.id = None\n\n def __repr__(self):\n return u'ID: %d, Cards: %d' % (self.id, len(self.cards))\n\n def join(self, game, number):\n assert not self.game\n self.game = game\n self.id = number\n\n def choose_public_card(self, cards):\n assert cards\n return cards[0]\n\n def act(self, card, action=None):\n assert self.game\n assert card\n\n if action is None:\n action = random.choice(ACTIONS)\n\n assert action in ACTIONS\n\n if action == ACTION_TAKE_CARD:\n self.cards.append(card)\n self.game.take_card(card)\n elif action == ACTION_PILE_CARD:\n self.game.pile_card(card)\n elif action == ACTION_DISCARD_CARD:\n self.game.discard_card(card)\n elif action == ACTION_TAKE_PUBLIC_CARD:\n self.cards.append(card)\n self.game.take_public_card(card)\n else:\n self.game.show_card(card)\n\n self.game.turn_complete(self)\n\n return action\n" } ]
1
nimoes/itp-u4-c2-hangman-game
https://github.com/nimoes/itp-u4-c2-hangman-game
9df34c2c7a045c52b96508026d6a0298de47a8a0
e6dd7290940d428bd66cf51b8b02aedde5ab4727
d0fe1c7ad6b35cb5356452b995e19300ea2bc45a
refs/heads/master
2020-03-21T19:33:29.681180
2018-07-02T04:46:44
2018-07-02T04:46:44
138,956,314
0
0
MIT
2018-06-28T02:30:58
2018-01-06T01:13:06
2018-05-31T17:41:47
null
[ { "alpha_fraction": 0.7612245082855225, "alphanum_fraction": 0.7612245082855225, "avg_line_length": 21.272727966308594, "blob_id": "ed9de8aa42520470b351c39c2d8c7005e53fd11e", "content_id": "51ac2ea65f730bbe466fbefe9d49aee88e5b1bcf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "permissive", "max_line_length": 51, "num_lines": 22, "path": "/hangman/exceptions.py", "repo_name": "nimoes/itp-u4-c2-hangman-game", "src_encoding": "UTF-8", "text": "class InvalidListOfWordsException(Exception):\n print(\"Invalid list of words exception raised\")\n\n\nclass InvalidWordException(Exception):\n print(\"InvalidWordException raised\")\n\n\nclass GameWonException(Exception):\n print(\"You won the game! Congrats!\")\n\n\nclass GameLostException(Exception):\n print(\"You lost the game!\")\n\n\nclass GameFinishedException(Exception):\n print(\"Game Finished!\")\n\n\nclass InvalidGuessedLetterException(Exception):\n print(\"Incorrect letter. Try again!\")\n" }, { "alpha_fraction": 0.6386936902999878, "alphanum_fraction": 0.6407606601715088, "avg_line_length": 29.237499237060547, "blob_id": "b9fb5c6a21b05fda0249f233970185e6cdf9ed76", "content_id": "59246854eeb1980a390d6d85fe46855efe7a4e67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2419, "license_type": "permissive", "max_line_length": 86, "num_lines": 80, "path": "/hangman/game.py", "repo_name": "nimoes/itp-u4-c2-hangman-game", "src_encoding": "UTF-8", "text": "from .exceptions import *\nimport random\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = ['Pineapple', 'coconut', 'chocolate', 'vAnilla', 'strawberry', 'mint']\n\n\ndef _get_random_word(list_of_words):\n if not list_of_words:\n raise InvalidListOfWordsException()\n return random.choice(list_of_words)\n\ndef _mask_word(word):\n masked = \"\"\n if not word:\n raise InvalidWordException()\n\n for eachchar in word:\n masked += \"*\"\n return masked\n \ndef _uncover_word(answer_word, masked_word, character):\n if not answer_word or len(answer_word) != len(masked_word):\n raise InvalidWordException()\n elif len(character) != 1:\n raise InvalidGuessedLetterException()\n else:\n masked_list = list(masked_word)\n\n for index, eachchar in enumerate(answer_word.lower()):\n if eachchar == character.lower():\n masked_list[index] = eachchar\n revealed = ''.join(masked_list)\n return revealed\n\ndef guess_letter(game, letter):\n chosen_word = game['answer_word'] #string will not change throughout\n masked_word = game['masked_word'] #string\n\n # check if the game has already been completed\n if chosen_word == masked_word or game['remaining_misses'] < 1:\n # word is completely revealed or user out of guesses\n raise GameFinishedException()\n\n letter = letter.lower()\n \n # if letter is reused\n if letter in game['previous_guesses']:\n raise InvalidGuessedLetterException()\n else:\n game['previous_guesses'].append(letter)\n\n word_inprogress = _uncover_word(chosen_word, masked_word, letter)\n\n # if letter has not been revealed\n if letter not in word_inprogress:\n game['remaining_misses'] -= 1\n else:\n # if new letter has been revealed\n game['masked_word'] = word_inprogress\n\n # check game status\n if \"*\" not in game['masked_word']:\n raise GameWonException()\n elif game['remaining_misses'] < 1:\n raise GameLostException()\n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None:\n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game\n" } ]
2
ydc1992/pytho-script
https://github.com/ydc1992/pytho-script
44aae9ba6adb0e7e2efcdc6f7a441706a0923eeb
1b7ff9392671b9a6eb7618a4f8b81a4ea22f4199
aae4c2d8fa3f896c6dab1ed39b002c1e31ad07af
refs/heads/master
2021-01-10T01:12:19.834021
2016-04-11T20:35:19
2016-04-11T20:35:19
54,308,054
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.32591304183006287, "alphanum_fraction": 0.3660869598388672, "avg_line_length": 41.917911529541016, "blob_id": "54f8c669b64ef15d9f36e3354c9f51e6b388fec7", "content_id": "bffb5ec7dfa20cde1263019c3d14308377aa9694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6248, "license_type": "no_license", "max_line_length": 133, "num_lines": 134, "path": "/new.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "GB18030", "text": "#coding=utf-8\nimport win32clipboard as w,win32con,re\nimport sys\n\nhelp = \\\n\"\"\"new.py [Options]\n - #SHA1前面去掉加上--\n sha1 #获取(剪切板内)木马库中的SHA1\n s #获取剪切板内的SHA1\n HEX #去除16进制空格\n hashsig #获取木马库HashSig\n number #统计每行在文件里出现次数\n arcinfo #匹配信息\n\"\"\"\n#所有操作内容取自剪切板\n\n\nData = 0\n \ndef GetText(): #获取剪切板\n w.OpenClipboard()\n d = w.GetClipboardData(win32con.CF_TEXT)\n w.CloseClipboard()\n return d\n\ndef SetText(aString): #存入剪切板\n w.OpenClipboard()\n w.EmptyClipboard()\n w.SetClipboardData(win32con.CF_TEXT, aString)\n w.CloseClipboard()\n \n#-------------------------------------- \ndef __():#添加或去掉剪切板内SHA1前面的--\n global Data \n if re.match('--\\s{0,3}(\\w{40})',Data):\n Data = '\\n'.join(re.findall('--\\s{0,3}(\\w{40})',Data))\n else:\n Text = re.findall('\\w{40}',Data)\n Text[0] = '--'+Text[0]\n Data = '\\n--'.join(Text).rstrip('-')\n#--------------------------------------\ndef HEX():#16进制(剪切板内)去掉空格\n global Data \n Data = ''.join(Data.split())\n#--------------------------------------\ndef SHA1(n):#木马库(剪切板内)提取SHA1\n global Data\n if re.match('0x\\w{8}\\,0x\\w{16} \\/\\/ (\\w{40})',Data): \n Data = '\\n'.join(re.findall('0x\\w{8}\\,0x\\w{16} \\/\\/ (\\w{40})',Data)) #只在木马库格式内匹配SHA1\n elif n == 1:\n Data = '\\n'.join(re.findall('\\w{40}',Data)) #任意格式匹配SHA1\n#--------------------------------------\ndef HasSig():#获取木马库hashsig\n global Data \n if re.match('0x\\w{8}\\,0x\\w{16}',Data): \n Data = '\\n'.join(re.findall('0x\\w{8}\\,0x\\w{16}',Data))\n#--------------------------------------\ndef arcinfo():\n global Data\n try:#尝试打开arcinfo.log如果没有还是用剪切板内数据\n file = open(\"arcinfo.log\")\n Data = file.read()\n except:\n pass\n # sha1: 5cfa487096c2ec4bb8258fab92de303f0e81ec3a\n Data1 = '\\n'.join(re.findall('arctype: AT_[a-z]{2,6}',Data)) # arctype: AT_xx\n Data2 = '\\n'.join(re.findall('peflags: PEF_[a-z]{4}.*',Data)) # peflags: PEF_xxxx \n # file_size: == 0x2800\n # peovly_size: == 0x0\n # peexp_size: == 0x0\n # peimp_size: == 0x3c\n # peres_size: == 0x428\n # petls_size: == 0x0\n # pesect_count: == 0x8\n # pexsect_count: == 0x3\n # peent_sectno: 0\n # dt_excp_count: == <unk>\n # dt_syscall_count: == <unk>\n # dt_syscallp_count: == <unk>\n # dt_dropfile_count: == <unk>\n # dt_subproc_count: == <unk>\n # [X] peimplib_count: == 2\n # [X] peimpsym_count: == 17 \n # hash_headtail: 0x0000000000\n # hash_peent_adj: 0x0000000000\n # hash_peent_sect: 0x00000000000\n # hash_peent_secthead: 0x0000000000\n # hash_peent_sectback: 0x0000000000\n Data3 = '\\n'.join(re.findall('hash_.+\\: 0x\\w{10,12}',Data)) # hash_peovly_headtail: 0x0000000000 \n Data4 = '\\n'.join(re.findall('hashsig: 0x\\w{8}\\,0x\\w{16}',Data))# hashsig: 0x6ca3a9e0,0x95b6607316b3c0f8\n\n Data = Data1 + \"\\n\" + Data2 + \"\\n\" + Data3 + \"\\n\" + Data4\n#--------------------------------------\ndef number():\n #统计每行在在文件里出现次数,用于统计木马库\n #运行前去掉木马库注释\n import operator\n global Data \n count_dict = {}\n for line in Data.split(\"\\n\"):\n line = line.strip()\n count = count_dict.setdefault(line, 0) #在字典中查询,...\n count += 1\n count_dict[line] = count\n sorted_count_dict = sorted(count_dict.iteritems(), key=operator.itemgetter(1), reverse=True)\n Data = \"\"\n for item in sorted_count_dict:\n Data = Data + item[0] + \",\" + str(item[1]) + \"\\n\"\n\ndef main(i):\n if i==\"-\":#SHA1前面去掉加上--\n __()\n elif i==\"sha1\":#获取(剪切板内)木马库的SHA1\n SHA1(0) \n elif i==\"s\":#获取剪切板内的SHA1\n SHA1(1) \n elif i==\"HEX\":#去除16进制空格\n HEX()\n elif i==\"hashsig\":#获取木马库HashSig\n HasSig()\n elif i==\"number\":#统计每行在在文件里出现次数\n number()\n elif i==\"arcinfo\":#匹配信息\n arcinfo()\n else:\n print help\n \nif __name__ == '__main__':\n for i in sys.argv:\n Data = GetText()#获取剪切板内数据\n # arcinfo number\n main(i) # hashsig number 切出HashSig 在统计分别出现次数\n # ............ 等等\n SetTe" }, { "alpha_fraction": 0.5589353442192078, "alphanum_fraction": 0.5627376437187195, "avg_line_length": 17.821428298950195, "blob_id": "b25a43598b8ba17553dee9b707c9a57f15f68159", "content_id": "76517cb596d04dd4f5ad64a9ee613d2c2853d84e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "no_license", "max_line_length": 62, "num_lines": 28, "path": "/ziw.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "#coding:utf-8\n\nimport zipfile,os\n\n\n\nclass zipfil():\n def __init__(self,filepath):\n self.mzipfile = zipfile.ZipFile(filepath,'a')\n\n\n def unpack(self):\n a =3\n\n def pack(self):\n pass\n\ndef main():\n path = r'C:\\Users\\Ken\\Documents\\My Knowledge\\Data\\908526831@qq.com'\n for root,dir,filenames in os.walk(path):\n for filename in filenames:\n path = os.path.join(root,filename)\n if zipfile.is_zipfile(path):\n f = zipfil(path)\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5877903699874878, "alphanum_fraction": 0.6034575700759888, "avg_line_length": 33.943397521972656, "blob_id": "9b94ad1b8074c5c4112e519d8661cb4380e95924", "content_id": "5e433dd005ba08dddb2d6e6d0b6ba336398235c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2019, "license_type": "no_license", "max_line_length": 84, "num_lines": 53, "path": "/checkapk.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "GB18030", "text": "#coding:gb2312\nimport zipfile,argparse\n\n#apk查壳特征\npack_ijiami = ['爱加密',['libexec.so','libexecmain.so']],\npack_apkprotect = ['apkprotect',['apkprotect.com','libAPKProtect.so']],\npack_360 = ['360加固',['libprotectClass.so','libprotectClass_x86.so'\n 'libjiagu.so','libjiagu_art.so','libjiagu.so','libjiagu_x86.so']],\npack_bangbang = ['梆梆加固企业版',['libDexHelper.so','libDexHelper-x86.so']],\npack_tp = ['腾讯加固',['libtup.so','libshell.so']],\npack_baidu = ['百度加固',['libbaiduprotect.so','ibbaiduprotect_x86.so']],\npack_najia = ['娜迦加固',['libddog.so','libfdog.so','libchaosvmp.so']],\npack_wangqin = ['网秦加固',['libnqshieldx86.so','libnqshield.so']],\npack_ali = ['阿里加固',['libmobisec.so','libmobisecx.so']],\npack_tfd = ['通付盾加固',['libegis.so']],\n\npak_list = [pack_ijiami ,pack_apkprotect ,pack_360 ,pack_bangbang ,\n pack_tp ,pack_baidu ,pack_najia ,pack_wangqin ,pack_ali ,pack_tfd ]\n#查apk壳\ndef checkPack( zipfilename):\n for pakcket in pak_list:\n for u in zipfilename:\n if u.split('/')[-1] in pakcket[0][1]:\n return pakcket[0][0]\n return '未加壳或者未知壳'\n\n# 不解压文件,获取文件列表\ndef getzipfilename(path):\n filename = []\n try:\n zipinfo = zipfile.ZipFile(path,'r')\n zipinfolist = zipinfo.infolist()\n except Exception,e:\n return\n for f in zipinfolist:\n filename.append(f.filename)\n return filename\n\ndef main():\n parser = argparse.ArgumentParser(description='apk查壳工具 by Ken' )\n parser.add_argument('-f','--file', help='指定文件' , nargs=\"+\")\n args = parser.parse_args()\n path = args.file\n if path:\n filename = getzipfilename(path[0])\n if not filename:\n print '不是标准apk文件'\n exit()\n print checkPack(filename)\n else:\n print '请选择文件路径'\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6112532019615173, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 26, "blob_id": "4f55baf31805fb2df77427766b65ba046a8daec4", "content_id": "89a62d7f337312a8336731e529ca3dd771fcf571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 107, "num_lines": 29, "path": "/ss.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "#coding:gb2312\n\nimport StringIO,pycurl\nfrom bs4 import BeautifulSoup\n\ndef GetData(url, ProxyIP=None,reffer=None,cookie = None):\n b = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.setopt(pycurl.USERAGENT, \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\")\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.COOKIE,cookie)\n if reffer:\n c.setopt(pycurl.REFERER,reffer)\n c.perform()\n return b.getvalue()\n\ndef main():\n url = 'http://www.ishadowsocks.com/'\n data = GetData(url)\n soup = BeautifulSoup(data)\n da = soup.select('section#free div.container div.row div.col-lg-4.text-center')\n data = da[1].text\n\n for u in data:\n f = open()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6408601999282837, "alphanum_fraction": 0.6903225779533386, "avg_line_length": 34.846153259277344, "blob_id": "31a3e02689bea6c67e7002a6abf146eb55e38058", "content_id": "4bc85af910e1dc4dd3d6173f714b90e0a4cbf3c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 107, "num_lines": 13, "path": "/test1.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "import StringIO,pycurl\n\ndef Curl(url):\n b = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.setopt(pycurl.USERAGENT, \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\")\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.SSL_VERIFYPEER,0)\n c.setopt(pycurl.SSL_VERIFYHOST,0)\n c.setopt(pycurl.COOKIEFILE, \"pycookie.txt\")\n c.setopt(pycurl.COOKIEJAR, \"pycookie.txt\")\n return b.getvalue()" }, { "alpha_fraction": 0.5765443444252014, "alphanum_fraction": 0.600716233253479, "avg_line_length": 29.189189910888672, "blob_id": "2d9dc1f103014e614ec6ffef769c334a92713d8a", "content_id": "5f05e0dd0255bffe3c7f5419c734b080016a45a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 107, "num_lines": 37, "path": "/Test.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "import StringIO\nimport os\nimport pycurl\nimport re\n\n\ndef GetData(url, ProxyIP=None, filePath=None):\n b = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.setopt(pycurl.USERAGENT, \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\")\n c.setopt(pycurl.URL, url)\n if ProxyIP:\n c.setopt(pycurl.PROXY, ProxyIP)\n if filePath:\n c.fp = open(filePath, 'wb')\n c.setopt(pycurl.WRITEDATA, c.fp)\n c.perform()\n return b.getvalue()\n\n\ndef DownPDFFile():\n Categories = ['security']\n pdfSet = set()\n for i in range(1, 3):\n url = 'http://www.allitebooks.com/%s/page/%d/' % (Categories[0], i)\n list = re.findall(\"<a href=\\\"(.*?)\\\" rel=\\\"bookmark\\\">\", GetData(url))\n pdfSet.update(list)\n for url in pdfSet:\n downloadurl = re.findall(\"href=\\\"(.*?)\\\" target=\\\"_blank\\\">Download PDF\", GetData(url))\n filename = downloadurl[0].split('/')[-1]\n local = os.path.join(\"D:\\Ebook\\\\\" + filename)\n GetData(downloadurl[0], filePath=local)\n\n\nif __name__ == '__main__':\n DownPDFFile()\n" }, { "alpha_fraction": 0.5089545249938965, "alphanum_fraction": 0.5651016235351562, "avg_line_length": 33.01234436035156, "blob_id": "a83d8c62ca0493fecf742f7e812947f4865273bd", "content_id": "db2ede11ec666d3ec7c6fdf9b1fbdac15d2756c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8752, "license_type": "no_license", "max_line_length": 650, "num_lines": 243, "path": "/VR(2).py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "# coding:utf-8\n \nimport StringIO,pycurl,re,os\nfrom bs4 import BeautifulSoup\nimport sys \nreload(sys) \nsys.setdefaultencoding('gbk')\ncookie1 = '''info=xfsk_cookie_user_id_remember=jLCQLfZCocvZJpgez3Ce/WZiM2yJGCzXHChA4fkgEC7EYuxVtmt85xw3M5rOhfkJkvqgEA31KCOsrFLOkbS3fV44MDqTiNfPzmmOS+OxFknwVEuExzpnl5sB69g9lLp7KCd9ju+yoi1cA90S+xJtnDhlzoFtAYZnqJy89vodGAw=&xfsk_cookie_user_name_remember=CwpdM39jad8cW9OiPqOPw8z2C7GqUBGvRRBm1hakI/ObiFEFDvdg0WMtcBUXIdentWCXNwp1kjQzZnSWuBZfpD/5MvW7j50pbSWLsLmSMfU69Tf7Y6/StAg3zqWmARoIxTk08ELOS9RpE1snPztPLwXSa3VgCW/TtJEofa4u33I='''\n\n\ncookie = '''Hm_lvt_225694c67b2c1d267db850fac9dd0170=1453869310,1454295124,1455538387;87loginname=c6bd3f84-3590-40a1-a765-a022b6f47199;CNZZDATA1255576886=1142795298-1453864149-http%253A%252F%252F87870.com%252F%7C1455537293;CNZZDATA1256099391=905726621-1453864149-http%253A%252F%252F87870.com%252F%7C1455535446;Hm_lvt_a96d250ab37bce0300949f861327c0a0=1454295634,1454323256,1455538379,1455538440; ASP.NET_SessionId=msz4j5j03iihucrmpaaulrec;Hm_lpvt_a96d250ab37bce0300949f861327c0a0=1455540079;Hm_lpvt_225694c67b2c1d267db850fac9dd0170=1455538387;info=xfsk_cookie_user_id_remember=J2DujH62IlXB/31bKIPoRgZSJBmWUmHwmPm1z4j/Uho1dZFO7M9GZpgvoNiKi0G7VROyeOcmT4EoKmhJob5hjqjmrQN1jwErvVyZJKRmc0YFipbJfM3cXSHs46Sl5Wu+G2VGK1PLMWjSe84fC7wCzGi/ylb9OralLMqe55Yz+8M=&xfsk_cookie_user_name_remember=jgWDWEK6Nybbrpb6Xoz64UX+JazOupGPU6YHEH1JsUd8IK9wTidvculLBRL7nwTzCexMSW1DtfY8P20vmveHcxss5qQ+k9r0QGF35aKVtOnXvHcYZo5BpJZSfT6aH7OEMiTX5/wBemp9sAQJcvVvIo21XnV6UOQIy70rYD6O9VE='''\n\n\n\n\n# 设置curl对象 的proxy reffer cookies\ndef SetOpt(target_address,proxy=None,reffer=None,cookies=None):\n c = pycurl.Curl()\n c.setopt(pycurl.URL,target_address)\n\n if proxy != None:\n c.setopt(pycurl.PROXY,proxy) # 设置代理\n if reffer != None:\n c.setopt(pycurl.REFERER,reffer)\n if cookies !=None:\n c.setopt(pycurl.COOKIE,cookies)\n return c\n\n\nclass downloader:\n def __init__(self,target_address,out_filePath,reffer,cookies,proxy=None):\n print out_filePath\n self.output_file=out_filePath # 输出路径\n self.chunk=1*1024*1024 # 设置每次下载的块大小\n #创建存放文件的目录\n try:\n self.dir_name=self.output_file+\"tmp\"\n print self.dir_name\n os.mkdir(self.dir_name)\n except OSError:\n pass\n ######### 设置CURL对象 ######\n self.curl_obj= SetOpt(target_address,reffer=reffer,cookies=cookies)\n tmp_curl_obj = SetOpt(target_address,reffer=reffer,cookies=cookies)\n\n ##### 得到并设置下载文件的大小 ######\n tmp_curl_obj.setopt(tmp_curl_obj.NOBODY,True)\n try:\n print \"Trying to get size of the file\"\n tmp_curl_obj.perform()\n self.size = tmp_curl_obj.getinfo(tmp_curl_obj.CONTENT_LENGTH_DOWNLOAD)\n print self.size\n except Exception, e:\n print e\n self.delete_temp()\n self.size = 0\n #打印进度\n# self.curl_obj.setopt(self.curl_obj.NOPROGRESS,1)\n self.curl_obj.setopt(self.curl_obj.PROGRESSFUNCTION,self.progress)\n\n ##### 下载 ######\n def download(self):\n if (self.size>0):\n print \"Starting download. Total size: \"+str(self.size)+\" bytes or \"+str(self.size/1024/1024)+\" MB\" \n else:\n print \"Starting download\"\n\n ##### 如果文件大小小于或等于块大小 就直接下载 不用分块了 #####\n if self.size <=self.chunk or self.size<0:\n self.curl_obj.fp = open(self.output_file, \"wb\")\n self.curl_obj.setopt(pycurl.WRITEDATA, self.curl_obj.fp)\n self.curl_obj.perform()\n self.curl_obj.fp.close()\n return\n \n ##### 设置超时时间 #####\n self.curl_obj.setopt(pycurl.TIMEOUT,60*10)\n log=open(\"downloader.log\",\"a\")\n\n lim_l=0\n lim_u=self.chunk\n i=1\n ###### 下载文件 #####\n while lim_l < self.size :\n temp_output=os.path.join(self.dir_name,\"output\"+str(i))\n ###### 如果该分块已经存在且大小等于块大小1024*1024 说明该分块已经下载完成,继续下一次循环 #####\n if os.path.exists(temp_output) and os.path.getsize(temp_output)==self.chunk:\n #print \"skip chunk \", i, lim_l\n i=i+1\n r=str(lim_l)+\"-\"+str(lim_u-1) # 下载的文件分块范围 如 0-(1M-1)、 (1M-(2M-1))....\n lim_l=lim_l+self.chunk\n lim_u=lim_u+self.chunk\n continue\n \n ##### 没有下载则开始下载 #####\n self.curl_obj.fp = open(temp_output, \"wb\")\n self.curl_obj.setopt(pycurl.WRITEDATA, self.curl_obj.fp)\n r=str(lim_l)+\"-\"+str(lim_u-1)\n self.curl_obj.setopt(pycurl.RANGE,r)\n \n print \"download chunk\", i\n ##### 下载文件 #####\n while True:\n ##### 下载完成跳出这个循环 #####\n try:\n self.curl_obj.perform()\n self.curl_obj.fp.close()\n break\n ###### 异常则继续下载 #####\n except pycurl.error, e:\n logmsg = \"Pycurl error caught \"+str(e)+\" while downloading at download range \"+str(r)+\" while storing to file \"+str(temp_output)+\"\\n\"\n log.write(logmsg)\n print \"download {} exception\".format(i)\n self.curl_obj.fp.close()\n self.curl_obj.fp=open(temp_output,\"wb\")\n continue\n\n i=i+1\n lim_l=lim_l+self.chunk\n lim_u=lim_u+self.chunk\n\n\n ##### 删除下载的临时文件 #####\n def delete_temp(self):\n i=1\n while True:\n temp_output=os.path.join(self.dir_name,\"output\"+str(i))\n if os.path.exists(temp_output):\n os.remove(temp_output)\n else:\n break\n i=i+1\n try:\n os.rmdir(self.dir_name)\n except Exception, e:\n pass\n ##### 合并文件 #####\n def concatenate(self):\n ##### 合并前清空output_file的内容 #####\n fp=open(self.output_file,'wb')\n i=1\n\n while True:\n temp_output=os.path.join(self.dir_name,\"output\"+str(i))\n if not os.path.exists(temp_output):\n break\n \n ##### 读取分块内容,依次附加到output_file #####\n print \"write chunk\", i\n tp=open(temp_output,\"rb\")\n buf = tp.read(1024 * 1024)\n fp.write(buf)\n tp.close()\n i += 1\n \n fp.close()\n\n #打印进度\n def progress(self,download_total,downloaded,uploaded_total,upload):\n print \"To be downloaded\" + str(download_total)\n print \"Downloaded : \" + str(downloaded)\n\n\n# 获取网页数据\ndef GetData(url, ProxyIP=None,reffer=None):\n b = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.setopt(pycurl.USERAGENT, \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\")\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.COOKIE,cookie)\n if reffer:\n c.setopt(pycurl.REFERER,reffer)\n c.perform()\n return b.getvalue()\n\n# 得到页数\ndef GetPageCount(url):\n data = GetData(url)\n soup = BeautifulSoup(data)\n da = soup.select('.pageNav')\n page = re.findall(u\"共(.*?)页\", da[0].text)[0]\n return page\n\n# 得到游戏的下载地址\ndef GetGameDownloadURL(pageURL):\n data = GetData(pageURL)\n soup = BeautifulSoup(data)\n try:\n da = soup.select(\".download\")\n except Exception,e:\n pass\n return da[0]['href']\n\n\n# 得到游戏的下载页面\ndef GetGameDownloadPage(url):\n DownloadPageList = []\n data = GetData(url)\n soup = BeautifulSoup(data)\n\n da = soup.select(\".download_btn\")\n for u in da:\n DownloadPageList.append('http://d.87870.com/'+u['href'])\n return DownloadPageList\n\ndef main():\n downloadPageList = []\n count = GetPageCount('http://d.87870.com/xiazai-1-0603-1_.html')\n\n #获取所有页的游戏下载页面\n for page in range(1,2):#int(count)):\n url = 'http://d.87870.com/xiazai-%d-0603-1_.html'% page\n downloadPageList.extend(GetGameDownloadPage(url))\n for u in downloadPageList:\n downurl = GetGameDownloadURL(u)\n data = GetData(downurl,reffer=u)\n if \"store.steampowered.com\" in data:\n continue\n \n GameURL = re.findall('<a href=\\\"(.*?)\\\">here',data,re.S)[0]\n \n print \"downloading \", GameURL\n import urllib\n filename1 = urllib.unquote(GameURL.rpartition('/')[2])\n filename = urllib.unquote(filename1).decode('utf-8')\n print filename\n d = downloader(\n GameURL,\"D:\\\\123\\\\{}\".format(filename),\n reffer=u,cookies=cookie)\n d.download()\n d.concatenate()\n #d.delete_temp()\n\ndef test():\n downloadurl = \"http://ptbig.putaoa.com/mancdn/up/app/10/sgqyzbltx2.0.0b_151225com.putao.PtSanguo.apk\"\n out_filePath = \"D:\\\\123\\\\123.apk\"\n d = downloader(downloadurl,out_filePath,None,None,None)\n d.download()\n d.concatenate()\nif __name__ == '__main__':\n main()\n #test()" }, { "alpha_fraction": 0.5752625465393066, "alphanum_fraction": 0.6021003723144531, "avg_line_length": 28.586206436157227, "blob_id": "0728995e31db46cc8d3f7de7c12b6d9e190d933c", "content_id": "98f1296bc97f8b9b35ace4ffd7057a71ea14a509", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/pdfurl.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "__author__ = 'Ken'\n# -*- coding: utf-8 -*-\nimport os,re,time,urllib,urllib2\n\nPDFList = []\nurl = \"http://www.allitebooks.com/security/page/\"\n\ndef calcRate(a, b, c):\n per = 100.0 * a * b / c\n if per > 100:\n per = 100\n if per == 100:\n print \"download complete\"\n\nfor i in range(1, 3):\n request = urllib2.urlopen(url + str(i))\n data = request.read()\n PDFListTem = re.findall(\"<a href=\\\"(.*?)\\\" rel=\\\"bookmark\\\">\", data)\n PDFList.extend(PDFListTem)\nPDFSet = set(PDFList)\ncount = 0\nfor pdfurl in PDFSet:\n request = urllib2.urlopen(pdfurl)\n data = request.read()\n downurl = re.findall(\"href=\\\"(.*?)\\\" target=\\\"_blank\\\">Download PDF\", data)\n filename = downurl[0].split('/')[-1]\n print \"Start download \" + filename\n local = os.path.join(\"D:\\Ebook\\\\\" + filename)\n urllib.urlretrieve(downurl[0], local, calcRate)" }, { "alpha_fraction": 0.5467730164527893, "alphanum_fraction": 0.5699782371520996, "avg_line_length": 29.64444351196289, "blob_id": "4489c2ce6f928160045f4b17acf662875558473c", "content_id": "c6051a7fdd54a75cc377bbb6e10deb8be38a91a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1393, "license_type": "no_license", "max_line_length": 111, "num_lines": 45, "path": "/xxx.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport pycurl,StringIO,urllib2\nclass blog():\n def __init__(self,pageurl,urlscope,startpage,endpage,urlreg,time,artitle):\n self.pageURL = pageurl\n self.startPage = startpage\n self.endPage = endpage\n self.time = time\n self.artitle = artitle\n self.urlscope = urlscope\n self.urlreg = urlreg\n\n def Curl(self,url, **kwargs):\n b = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.setopt(pycurl.USERAGENT, \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\")\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.SSL_VERIFYPEER, 0) # 支持https\n c.setopt(pycurl.SSL_VERIFYHOST, 0)\n c.setopt(pycurl.COOKIEFILE, \"pycookie.txt\")\n c.setopt(pycurl.COOKIEJAR, \"pycookie.txt\")\n # 可扩展参数\n for k, v in kwargs.iteritems():\n c.setopt(vars(pycurl)[k], v)\n try:\n c.perform()\n except pycurl.E_SSL_CONNECT_ERROR, e:\n return\n return b.getvalue()\n\n def parse(self):\n for i in range(self.startPage,self.endPage):\n url = self.pageURL.replace('{0}','%d'%i)\n data = self.Curl(url)\n\n\n\ndef main():\n b = blog(\"http://www.freebuf.com/page/{0}\",1,100)\n a = b.parse()\n c= 3\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5264244675636292, "alphanum_fraction": 0.5445912480354309, "avg_line_length": 35.149253845214844, "blob_id": "02eba5ffd0e76198ed598c41d19ff890107e9dd1", "content_id": "40fcce4c532432c7aca4ecdbdcf8056f286e643d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2484, "license_type": "no_license", "max_line_length": 107, "num_lines": 67, "path": "/CrawMalwr.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "GB18030", "text": "#coding:gb2312\nimport StringIO,pycurl,re,urllib\nfrom bs4 import BeautifulSoup\nimport os\n\ndef Curl(url,**kwargs):\n b = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.setopt(pycurl.USERAGENT, \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\")\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.SSL_VERIFYPEER,0) #支持https\n c.setopt(pycurl.SSL_VERIFYHOST,0)\n c.setopt(pycurl.COOKIEFILE, \"pycookie.txt\")\n c.setopt(pycurl.COOKIEJAR, \"pycookie.txt\")\n #可扩展参数\n for k, v in kwargs.iteritems():\n c.setopt(vars(pycurl)[k], v)\n try:\n c.perform()\n except pycurl.E_SSL_CONNECT_ERROR,e:\n return\n return b.getvalue()\n# 模拟登陆\ndef login():\n loginurl = 'https://malwr.com/account/login/'\n crsftoken = re.findall('name=\\'csrfmiddlewaretoken\\' value=\\'(.*?)\\'\\s*/>',\n Curl(loginurl))[0]\n post = urllib.urlencode({\n 'csrfmiddlewaretoken': crsftoken,\n 'username':'ken_yang',\n 'password':'sw3ptk',\n 'next':''\n })\n Curl(loginurl,POSTFIELDS=post)\n\ndef Test():\n pageurl = 'https://malwr.com/analysis/?page='\n login()\n\n for i in range(1,500):\n try:\n count = 0\n soup = BeautifulSoup(Curl(pageurl+'%d'%i)).select('.mono') #先定位到子节点\n for s in soup:\n data = Curl('https://malwr.com' + s.parent['href'])\n bu = BeautifulSoup(data)\n downloadurl ='https://malwr.com'+ bu.select(\".btn-primary.btn-small\")[0]['href']\n date = bu.select(\"table.table.table-striped tbody tr td\")[1].text.encode('utf-8')\n date = re.findall('\\d{4}\\-\\d{2}',date)[0].replace('-','.') #文件上传时间\n\n path = os.path.join('d:\\Malwar',date)\n if not os.path.exists(path):\n os.mkdir(path)\n if downloadurl == 'https://malwr.com#':\n continue\n count = count +1\n print '爬取第%d页第%d样本'%(i,count)\n filename = os.path.join(path,downloadurl.split('/')[-2])\n if os.path.exists(filename):\n print downloadurl.split('/')[-2] + 'is exists'\n continue\n open(filename,'wb').write(Curl(downloadurl,NOPROGRESS=0) )\n except pycurl.error,e:\n pass\nif __name__ == '__main__':\n Test()\n" }, { "alpha_fraction": 0.5051605701446533, "alphanum_fraction": 0.521789014339447, "avg_line_length": 24.66176414489746, "blob_id": "ce0a3f376d61079ff32c8a9fef517c7d24e37b27", "content_id": "cac52cccf980ae313f0c45586f0503e00f1018c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "no_license", "max_line_length": 78, "num_lines": 68, "path": "/VB_DownloadSample_V1.3.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "GB18030", "text": "#coding:gb2312\nimport optparse,sys,os,urllib,urllib2,time\n\nAPI_KEY = 'c2d3e2f082b144a6af9b710a685ae5723371cad56bd44a6cbb9ff66cca374293' #替换成自己的API_KEY\nBASE_URL = \"http://sample.virusbook.cn:18081/api/v1/file/\"\n\ndef Clip():\n pass\n\ndef downloadSample(hash,path,vc):\n filePath = os.path.join(path, hash)\n url = BASE_URL + \"not_detected_sample\"\n parameters = {\n \"sha256\": hash,\n \"vc\": vc,\n \"apikey\": API_KEY\n }\n data = urllib.urlencode(parameters)\n tryCount = 10\n\n while tryCount > 0:\n try:\n req = urllib2.Request(url, data)\n response = urllib2.urlopen(req)\n\n if response.code != 200:\n tryCount -= 1\n time.sleep(5)\n print \"downloadSample retry...\"\n continue\n else:\n # get the file content.\n content = response.read()\n\n fo = open(filePath, 'wb')\n fo.write(content)\n fo.close()\n\n break\n except Exception, e:\n tryCount -= 1\n if tryCount > 0:\n print 'get connection exception, retrying ...'\n time.sleep(3)\n else:\n print 'get connection exception, do not retry, exit.'\n raise e\n\ndef main():\n parser = optparse.OptionParser(usage = \"\"\"\n %prog -c -f <out_filepath>\n -c Get hash form clipboard\n -f Sample save path\n \"\"\")\n\n parser.add_option(\"-c\",action=\"store_true\", dest = \"clip\", default = None)\n parser.add_option(\"-f\",dest=\"outfile\",default=None)\n # 获取命令行参数\n (options, arguments) = parser.parse_args()\n\n if len(sys.argv) < 2:\n parser.print_usage()\n return -1\n path = options.outfile\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6133720874786377, "alphanum_fraction": 0.6494185924530029, "avg_line_length": 35.61701965332031, "blob_id": "4450c068fc9a2a9793fe5d516eca4c76e9e00e0c", "content_id": "edab8a4fd242253d6d1f0120f9e3da55f65dc192", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1778, "license_type": "no_license", "max_line_length": 89, "num_lines": 47, "path": "/VirusTotal.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "GB18030", "text": "#coding:gb2312\nfrom poster.encode import multipart_encode # easy_install poster\nfrom poster.streaminghttp import register_openers\nimport urllib2,urllib,simplejson,sys,hashlib,os\napikey = {'apikey': 'fa07ea9eddbc8ef8d6e6c0b433030230f7785d3aca1da3dbdb19762455a82ad7'}\nurl = 'https://www.virustotal.com/vtapi/v2/file/'\nvirustotalAPI = {'scan':url+'scan','rescan':url+'rescan','reports':url+'report'}\ndef scan(filepath):\n register_openers()\n file = open(filepath, \"rb\")\n params = {'file': file}\n params.update(apikey)\n datagen, headers = multipart_encode(params)\n\n m2 = hashlib.md5()\n m2.update(file.read())\n json = report(m2.hexdigest())\n if json['response_code'] == 1:\n reportformat(json)\n else:\n request = urllib2.Request(virustotalAPI['scan'], datagen, headers)\n result = simplejson.loads(urllib2.urlopen(request).read())\n reportformat(report(result['resource']))\ndef report(resource ):\n parameters = {\"resource\":resource}\n parameters.update(apikey)\n req = urllib2.Request(virustotalAPI['reports'], urllib.urlencode(parameters))\n str = urllib2.urlopen(req).read()\n if str == '':\n print '获取扫描结果失败,请稍后再试'\n exit(1)\n reportjson = simplejson.loads(str)\n return reportjson\ndef reportformat(json):\n if json['response_code'] == 1:\n print 'scan_date\\t' + json['scan_date']\n print 'scan result %d/%d'%(json['positives'],json['total'])\n for u in json['scans']:\n virus = json['scans'][u]\n print '\\t{0:<20}\\t{1:<40}\\t{2:<10}'.format(u,virus['result'],virus['update'])\n else :\n print '请求的资源扫描未完成,请稍后再试'\ndef main(argv):\n scan(argv[1])\n\nif __name__ == '__main__':\n main(sys.argv)" }, { "alpha_fraction": 0.5948660969734192, "alphanum_fraction": 0.625, "avg_line_length": 25.382352828979492, "blob_id": "e5ebae03eeb55ab62b75280b141d4d3359464a34", "content_id": "881b65587adba5c9ff302248ac2432d5bd475008", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 107, "num_lines": 34, "path": "/pdfbook.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport StringIO,pycurl,re,time\n\n\ndef Curl(url,**kwargs):\n b = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(pycurl.WRITEFUNCTION, b.write)\n c.setopt(pycurl.USERAGENT, \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0\")\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.SSL_VERIFYPEER,0) #支持https\n c.setopt(pycurl.SSL_VERIFYHOST,0)\n c.setopt(pycurl.COOKIEFILE, \"pycookie.txt\")\n c.setopt(pycurl.COOKIEJAR, \"pycookie.txt\")\n c.setopt(pycurl.NOPROGRESS,0) # 显示下载进度\n #可扩展参数\n for k, v in kwargs.iteritems():\n c.setopt(vars(pycurl)[k], v)\n try:\n c.perform()\n except pycurl.E_SSL_CONNECT_ERROR,e:\n return\n return b.getvalue()\n\ndef main():\n url = 'http://malc0de.com/database/'\n da = Curl(url)\n print da\n time.sleep(5)\n da = Curl(url)\n print da\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5671111345291138, "alphanum_fraction": 0.5786666870117188, "avg_line_length": 35.290321350097656, "blob_id": "5de4f22f2019cb7a766d18ed38310fc3b60bf86f", "content_id": "f9a5e82352c804d3a30d68a017628b1b064622ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1145, "license_type": "no_license", "max_line_length": 132, "num_lines": 31, "path": "/TrojDB.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport re,os\n\ndef GetHashList(): #从扫描日志中读取hash列表\n file_path = r\"D:\\1.txt\"\n file_data = open(file_path).read()\n hash_list = re.findall(\"(\\w{40})\",file_data,re.S)\n return hash_list\ndef WtiteFile(Trojan_hash,file_path):\n file_data = open(file_path,'r').read()\n tmp_data = file_data\n for hash in Trojan_hash:\n rep = r'[\\w,]*\\s*\\\"[\\w\\/\\.]*\\\"\\s\\/\\/\\s%s\\s\\([\\w\\.]*\\).*?\\n'%hash\n data = re.findall(rep,file_data,re.S)\n if data:\n print data\n tmp_data=re.sub(rep,\"\",tmp_data,flags=re.S)\n file = open(file_path,\"w+\")\n file.write(tmp_data)\n file.close()\ndef DeleteHash(TrojanHashList=None):\n for root,dirs,filenames in os.walk(\"C:\\Users\\Ken\\Documents\\My Knowledge\\Data\\908526831@qq.com\"):#r\"E:\\OneDriver\\OneDrive\\troj\"):\n for filename in filenames:\n if 'name' in filename:\n file_path = root +\"\\\\\" + filename\n file_data = open(file_path,\"r+\").read()\n WtiteFile(TrojanHashList,file_path)\nif __name__ == '__main__':\n DeleteHash()\n Hash_list = GetHashList()\n DeleteHash(Hash_list)\n" }, { "alpha_fraction": 0.769911527633667, "alphanum_fraction": 0.7853982448577881, "avg_line_length": 29.200000762939453, "blob_id": "ffa2502805f28f8213598632a7a78257e1e637a8", "content_id": "6af0bd934fd30b6ddab0a8e9dd08ee9f328c95e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 79, "num_lines": 15, "path": "/js.py", "repo_name": "ydc1992/pytho-script", "src_encoding": "UTF-8", "text": "#coding:gb2312\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time,pycurl,StringIO\n\nurl = 'https://www.hybrid-analysis.com/recent-submissions?filter=file'\n\ndriver = webdriver.PhantomJS(executable_path=r'D:\\phantomjs\\bin\\phantomjs.exe')\ndriver.get(url)\ntime.sleep(7)\npage_source = driver.page_source\n#data = driver.find_element_by_id('submissions')\ndriver.get(url)\npage_source = driver.page_source\nprint page_source.encode('utf-8')" } ]
15
MinePlugins/TME3-CFA
https://github.com/MinePlugins/TME3-CFA
6476daf944dad1d46f11f93e8afae2a5783d0234
c878365810750797cb84c19e3ea9a242a65d74f0
c40d7cf6b34ebd55592385e2e7d29bef88ba9920
refs/heads/main
2023-03-21T01:09:32.956083
2021-03-10T13:02:48
2021-03-10T13:02:48
346,355,970
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6891385912895203, "alphanum_fraction": 0.6966292262077332, "avg_line_length": 28.55555534362793, "blob_id": "05c81fdbffa5d59f10fbaaa380074a8e073f2a38", "content_id": "3f707b97c465a2427804f2b2e11693126187f343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/mySearchEngine/myStockProduct/models.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass ProductStock(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n tigID = models.IntegerField(default='-1')\n quantityInStock = models.IntegerField(default='0')\n\n class Meta:\n ordering = ('tigID',)\n\n" }, { "alpha_fraction": 0.6592565774917603, "alphanum_fraction": 0.6647230386734009, "avg_line_length": 37.125, "blob_id": "66192e291743972ac461f6435f4cb7b778450a28", "content_id": "e012f02cfc3c733aefb8d79434018c0d63e69f4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2744, "license_type": "no_license", "max_line_length": 89, "num_lines": 72, "path": "/mySearchEngine/myStockProduct/views.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nimport requests\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom myStockProduct.models import ProductStock\nfrom myStockProduct.serializers import ProductStockSerializer\nfrom django.http import Http404\nfrom django.http import JsonResponse\nfrom mytig.config import baseUrl\n\n# Create your views here.\nclass ProductsStock(APIView):\n def get(self, request, format=None):\n res=[]\n for prod in ProductStock.objects.all():\n serializer = ProductStockSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n jsondata['quantityInStock'] = serializer.data['quantityInStock']\n res.append(jsondata)\n return JsonResponse(res, safe=False)\n# def post(self, request, format=None):\n# NO DEFITION of post --> server will return \"405 NOT ALLOWED\"\n\nclass ProductStockDetail(APIView):\n def get_object(self, pk):\n try:\n return ProductStock.objects.get(tigID=pk)\n except ProductStock.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n prod = self.get_object(pk)\n serializer = ProductStockSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n jsondata['quantityInStock'] = serializer.data['quantityInStock']\n return Response(jsondata)\n\nclass DecrStock(APIView):\n def get_object(self, pk):\n try:\n return ProductStock.objects.get(tigID=pk)\n except ProductStock.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, qty, format=None):\n prod = self.get_object(pk)\n prod.quantityInStock -= qty\n prod.save()\n serializer = ProductStockSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n jsondata['quantityInStock'] = serializer.data['quantityInStock']\n return Response(jsondata)\n\nclass IncrStock(APIView):\n def get_object(self, pk):\n try:\n return ProductStock.objects.get(tigID=pk)\n except ProductStock.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, qty, format=None):\n prod = self.get_object(pk)\n prod.quantityInStock += qty\n prod.save()\n serializer = ProductStockSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n jsondata['quantityInStock'] = serializer.data['quantityInStock']\n return Response(jsondata)" }, { "alpha_fraction": 0.536649227142334, "alphanum_fraction": 0.5890052318572998, "avg_line_length": 20.22222137451172, "blob_id": "e25367fe88e0573328a73254e2d9f9f54a8dcd85", "content_id": "10179dad356ebc0f3a31c08e06b137119f549853", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/mySearchEngine/mytig/migrations/0002_produitenpromotion_newprice.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.7 on 2021-03-09 11:33\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mytig', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='produitenpromotion',\n name='newprice',\n field=models.FloatField(default=0),\n ),\n ]\n" }, { "alpha_fraction": 0.7362204790115356, "alphanum_fraction": 0.7362204790115356, "avg_line_length": 34.28571319580078, "blob_id": "dd6badcd32790777f59d6fe9de35fe80415c3096", "content_id": "5e6c08b8c2d69ac912fa801a3bd9f8be8a5937aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/mySearchEngine/myStockProduct/serializers.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from rest_framework.serializers import ModelSerializer\r\nfrom myStockProduct.models import ProductStock\r\n\r\nclass ProductStockSerializer(ModelSerializer):\r\n class Meta:\r\n model = ProductStock\r\n fields = ('id', 'tigID', 'quantityInStock')\r\n" }, { "alpha_fraction": 0.641267716884613, "alphanum_fraction": 0.643964946269989, "avg_line_length": 46.83871078491211, "blob_id": "c342ba275512ecaf11430e717079a654c7c575b6", "content_id": "b427da76fc5b2cbb9b7b991d1ef3576bc5ea6bad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 130, "num_lines": 31, "path": "/mySearchEngine/myStockProduct/management/commands/refreshStock.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom myStockProduct.models import ProductStock\nfrom myStockProduct.serializers import ProductStockSerializer\nfrom mytig.config import baseUrl\nimport requests\nimport time\nfrom random import randrange\n\n\nclass Command(BaseCommand):\n help = 'Refresh the list of products which are on sale.'\n\n def handle(self, *args, **options):\n self.stdout.write('['+time.ctime()+'] Refreshing data...')\n response = requests.get(baseUrl+'products/')\n jsondata = response.json()\n ProductStock.objects.all().delete()\n for product in jsondata:\n if product['availability']: # Verification de la disponibilité et création d'un stock aléatoire\n serializer = ProductStockSerializer(data={'tigID':str(product['id']), 'quantityInStock':randrange(1, 30)})\n if serializer.is_valid():\n serializer.save()\n self.stdout.write(self.style.SUCCESS('['+time.ctime()+'] Successfully added product id=\"%s\"' % product['id']))\n\n else:\n serializer = ProductStockSerializer(data={'tigID':str(product['id']), 'quantityInStock':0})\n if serializer.is_valid():\n serializer.save()\n self.stdout.write(self.style.SUCCESS('['+time.ctime()+'] Successfully added product id=\"%s\"' % product['id']))\n\n self.stdout.write('['+time.ctime()+'] Data refresh terminated.')\n" }, { "alpha_fraction": 0.6212338805198669, "alphanum_fraction": 0.6276103854179382, "avg_line_length": 38.212501525878906, "blob_id": "a21ce2f5a29bd3a9fea9b114adfdd4944dd23997", "content_id": "e22bd0e4ef10a25f9ab3a3d44c7c9e292c35e8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6274, "license_type": "no_license", "max_line_length": 102, "num_lines": 160, "path": "/mySearchEngine/mytig/views.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "import requests\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom mytig.config import baseUrl\n\n# Create your views here.\nclass RedirectionListeDeProduits(APIView):\n def get(self, request, format=None):\n response = requests.get(baseUrl+'products/')\n jsondata = response.json()\n return Response(jsondata)\n# def post(self, request, format=None):\n# NO DEFITION of post --> server will return \"405 NOT ALLOWED\"\n\nclass RedirectionDetailProduit(APIView):\n def get_object(self, pk):\n try:\n response = requests.get(baseUrl+'product/'+str(pk)+'/')\n jsondata = response.json()\n return Response(jsondata)\n except:\n raise Http404\n def get(self, request, pk, format=None):\n response = requests.get(baseUrl+'product/'+str(pk)+'/')\n jsondata = response.json()\n return Response(jsondata)\n# def put(self, request, pk, format=None):\n# NO DEFITION of put --> server will return \"405 NOT ALLOWED\"\n# def delete(self, request, pk, format=None):\n# NO DEFITION of delete --> server will return \"405 NOT ALLOWED\"\n\nclass RedirectionListeDeShipPoints(APIView):\n def get(self, request, format=None):\n response = requests.get(baseUrl+'shipPoints/')\n jsondata = response.json()\n return Response(jsondata)\n# def post(self, request, format=None):\n# NO DEFITION of post --> server will return \"405 NOT ALLOWED\"\n\nclass RedirectionDetailShipPoint(APIView):\n def get_object(self, pk):\n try:\n response = requests.get(baseUrl+'shipPoint/'+str(pk)+'/')\n jsondata = response.json()\n return Response(jsondata)\n except:\n raise Http404\n def get(self, request, pk, format=None):\n response = requests.get(baseUrl+'shipPoint/'+str(pk)+'/')\n jsondata = response.json()\n return Response(jsondata)\n\nfrom mytig.models import ProduitEnPromotion,ProduitDisponible\nfrom mytig.serializers import ProduitEnPromotionSerializer, ProduitDisponibleSerializer\nfrom django.http import Http404\nfrom django.http import JsonResponse\n\nclass PromoList(APIView):\n def get(self, request, format=None):\n res=[]\n for prod in ProduitEnPromotion.objects.all():\n serializer = ProduitEnPromotionSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n res.append(jsondata)\n return JsonResponse(res, safe=False)\n# def post(self, request, format=None):\n# NO DEFITION of post --> server will return \"405 NOT ALLOWED\"\nclass RemoveSale(APIView):\n def get_object(self, pk):\n try:\n return ProduitEnPromotion.objects.get(tigID=pk)\n except ProduitEnPromotion.DoesNotExist:\n raise Http404\n \n def get(self, request, pk, format=None):\n jsondata = {}\n prod = self.get_object(pk) \n if prod is not None and prod is not False:\n prod.delete()\n jsondata['message'] = \"Successfully delete {} from sale\".format(pk)\n return Response(jsondata)\n\nclass PutOnSale(APIView):\n def get_object(self, pk):\n try:\n return ProduitEnPromotion.objects.get(tigID=pk)\n except ProduitEnPromotion.DoesNotExist:\n return False\n # raise Http404\n \n def get(self, request, pk, newprice, format=None):\n jsondata = {}\n try:\n newprice = float(newprice)\n except:\n jsondata['message'] = \"New price is not a float\"\n return Response(jsondata)\n prod = self.get_object(pk)\n\n response = requests.get(baseUrl+'product/'+str(pk)+'/')\n jsondata_product = response.json()\n jsondata_product[\"discount\"] = newprice\n if response.status_code != 404: #Verfication de l'existant du produit en ligne\n if prod is not None and prod is not False: # SI il existe on va le mettre a jour\n prod.newprice = newprice\n prod.save()\n serializer = ProduitEnPromotionSerializer(prod)\n jsondata['message'] = \"Successfully update {} on sale by {}\".format(pk, newprice)\n else: #sinon on le crée\n serializer = ProduitEnPromotionSerializer(data={'tigID':str(pk), 'newprice':newprice})\n jsondata['message'] = \"Successfully put {} on sale by {}\".format(pk, newprice)\n\n if serializer.is_valid():\n serializer.save()\n jsondata['product'] = jsondata_product\n else:\n jsondata['message'] = \"Product ID {} not found\".format(pk)\n return Response(jsondata)\n\nclass PromoDetail(APIView):\n def get_object(self, pk):\n try:\n return ProduitEnPromotion.objects.get(tigID=pk)\n except ProduitEnPromotion.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n prod = self.get_object(pk)\n serializer = ProduitEnPromotionSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n jsondata[\"discount\"] = serializer.data['newprice']\n jsondata[\"sale\"] = True if serializer.data['newprice'] > 0 else False\n return Response(jsondata)\n\n\nclass DispoList(APIView):\n def get(self, request, format=None):\n res=[]\n for prod in ProduitDisponible.objects.all():\n serializer = ProduitDisponibleSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n res.append(jsondata)\n return Response(res)\n\nclass DispoDetail(APIView):\n def get_object(self, pk):\n try:\n return ProduitDisponible.objects.get(pk=pk)\n except ProduitDisponible.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n prod = self.get_object(pk)\n serializer = ProduitDisponibleSerializer(prod)\n response = requests.get(baseUrl+'product/'+str(serializer.data['tigID'])+'/')\n jsondata = response.json()\n return Response(jsondata)" }, { "alpha_fraction": 0.7864077687263489, "alphanum_fraction": 0.7864077687263489, "avg_line_length": 19.600000381469727, "blob_id": "55263ddc91aa517924fda69eb71e72a2fb4c30e4", "content_id": "f5d53fd204c97ee45a03cd0a25176ae443979843", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/mySearchEngine/myStockProduct/apps.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass MystockproductConfig(AppConfig):\n name = 'myStockProduct'\n" }, { "alpha_fraction": 0.5620993375778198, "alphanum_fraction": 0.567307710647583, "avg_line_length": 44.22222137451172, "blob_id": "f9d059228592d53f70a0d3f07693183708e3d8b3", "content_id": "1e77026429a8c60cce1f0f1c3a156d02f66ac0d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2496, "license_type": "no_license", "max_line_length": 163, "num_lines": 54, "path": "/mySearchEngine/mytig/management/commands/autoSaleOnQuantityOverflow.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand, CommandError\r\nfrom myStockProduct.models import ProductStock\r\nfrom myStockProduct.serializers import ProductStockSerializer\r\nfrom mytig.models import ProduitEnPromotion\r\nfrom mytig.serializers import ProduitEnPromotionSerializer\r\nfrom mytig.config import baseUrl\r\nimport requests\r\nimport time\r\nfrom random import randrange\r\n\r\n\r\nclass Command(BaseCommand):\r\n help = 'Refresh the list of products which are on sale.'\r\n\r\n def handle(self, *args, **options):\r\n self.stdout.write('['+time.ctime()+'] Refreshing data...')\r\n response = requests.get(baseUrl+'products/')\r\n jsondata = response.json()\r\n for product in jsondata:\r\n prod = None\r\n try:\r\n prod = ProductStock.objects.get(tigID=product['id'])\r\n except ProductStock.DoesNotExist:\r\n prod = None\r\n\r\n if prod is not None:\r\n if prod.quantityInStock > 16 and prod.quantityInStock < 64:\r\n before_diff = product['price'] * 0.80\r\n after_diff = abs(before_diff-product['price'])\r\n elif prod.quantityInStock >= 64:\r\n before_diff = product['price'] * 0.50\r\n after_diff = abs(before_diff-product['price'])\r\n else:\r\n after_diff = 0\r\n prodstock = None\r\n try:\r\n prodstock = ProduitEnPromotion.objects.get(tigID=product['id'])\r\n except:\r\n prodstock = None\r\n if prodstock is not None:\r\n prodstock.newprice = after_diff\r\n prodstock.save()\r\n self.stdout.write(self.style.SUCCESS('[{}] Successfully Updated product id={} with {} price'.format(time.ctime(),product['id'], after_diff)))\r\n\r\n else:\r\n serializer = ProduitEnPromotionSerializer(data={'tigID':str(product['id']), 'newprice':after_diff})\r\n if serializer.is_valid():\r\n serializer.save()\r\n self.stdout.write(self.style.SUCCESS('[{}] Successfully added product id={} with {} price'.format(time.ctime(),product['id'], after_diff)))\r\n\r\n else:\r\n self.stdout.write(self.style.WARNING('[{}] No stock for product id={}'.format(time.ctime(),product['id'])))\r\n\r\n self.stdout.write('['+time.ctime()+'] Data refresh terminated.')\r\n" }, { "alpha_fraction": 0.7156177163124084, "alphanum_fraction": 0.7156177163124084, "avg_line_length": 45.66666793823242, "blob_id": "7b45638bea627791b855b348e3f33e90e2234c4b", "content_id": "e110d2e0eeabe280cabf191c5c1caeee1277c59b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 89, "num_lines": 9, "path": "/mySearchEngine/myStockProduct/urls.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom myStockProduct import views\r\n\r\nurlpatterns = [\r\n path('myStockProduct/infoproducts/', views.ProductsStock.as_view()),\r\n path('myStockProduct/infoproduct/<int:pk>/', views.ProductStockDetail.as_view()),\r\n path('myStockProduct/incrementStock/<int:pk>/<int:qty>/', views.IncrStock.as_view()),\r\n path('myStockProduct/decrementStock/<int:pk>/<int:qty>/', views.DecrStock.as_view()),\r\n]\r\n" }, { "alpha_fraction": 0.6972972750663757, "alphanum_fraction": 0.7009009122848511, "avg_line_length": 38.64285659790039, "blob_id": "f5a810e3a102357483547bed096ab5f58eb261bb", "content_id": "9c30aec37d65352ad6f874758b7d3d8582ebd893", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "no_license", "max_line_length": 118, "num_lines": 28, "path": "/mySearchEngine/mytig/urls.py", "repo_name": "MinePlugins/TME3-CFA", "src_encoding": "UTF-8", "text": "from django.urls import path, register_converter, re_path\n\n\nfrom mytig import views\n\nclass FloatUrlParameterConverter: #Une class python qui respecte le besoin django pour prendre en charge les flottants\n regex = '[0-9]+\\.?[0-9]+'\n\n def to_python(self, value):\n return float(value)\n\n def to_url(self, value):\n return str(value)\n\nregister_converter(FloatUrlParameterConverter, 'floatsss')\n\nurlpatterns = [\n path('products/', views.RedirectionListeDeProduits.as_view()),\n path('product/<int:pk>/', views.RedirectionDetailProduit.as_view()),\n path('availableproducts/', views.DispoList.as_view()),\n path('availableproduct/<int:pk>/', views.DispoDetail.as_view()),\n path('shipPoints/', views.RedirectionListeDeShipPoints.as_view()),\n path('shipPoint/<int:pk>/', views.RedirectionDetailShipPoint.as_view()),\n path('onsaleproducts/', views.PromoList.as_view()),\n path('onsaleproduct/<int:pk>/', views.PromoDetail.as_view()),\n path('putonsale/<int:pk>/<floatsss:newprice>/', views.PutOnSale.as_view()),\n path('removesale/<int:pk>/', views.RemoveSale.as_view()),\n]\n" } ]
10
tree0flife/capstone
https://github.com/tree0flife/capstone
23a35a007d019374ae74cfdfee09f57d85494886
d75cff611f82b9189caa26d67ad30885b8957da2
125bd80849dd2a5c48d5e029145b2b3df4cacadb
refs/heads/master
2020-03-26T00:42:16.914242
2018-08-10T19:55:20
2018-08-10T19:55:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6069204211235046, "alphanum_fraction": 0.6112456917762756, "avg_line_length": 41.5, "blob_id": "f09a2e70c4a0c51da999d2531cdbff017051a16d", "content_id": "6dd3ec5b3176505df636c577db6161e2de6b0086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5780, "license_type": "no_license", "max_line_length": 174, "num_lines": 136, "path": "/aaf.py", "repo_name": "tree0flife/capstone", "src_encoding": "UTF-8", "text": "import subprocess, platform, getpass, os, sys\nimport yaml\nfrom cmd import Cmd\n\nclass cprompt(Cmd):\n\n def do_list(self, args):\n \"\"\"Prints list of available tools\"\"\"\n print('hindsight')\n print('dumpzilla')\n\n def do_dumpzilla(self, args):\n \"\"\" Options:\n --All (Shows everything but the DOM data. Doesn't extract thumbnails or HTML 5 offline)\n --Cookies [-showdom -domain <string> -name <string> -hostcookie <string> -access <date> -create <date> -secure <0/1> -httponly <0/1> -range_last -range_create <start> <end>]\n --Permissions [-host <string>]\n --Downloads [-range <start> <end>]\n --Forms\t[-value <string> -range_forms <start> <end>]\n --History [-url <string> -title <string> -date <date> -range_history <start> <end> -frequency]\n --Bookmarks [-range_bookmarks <start> <end>]\n --Cacheoffline [-range_cacheoff <start> <end> -extract <directory>]\n --Thumbnails [-extract_thumb <directory>]\n --Range <start date> <end date>\n --Addons\n --Passwords (Decode only in Unix)\n --Certoverride\n --Session\n --Watch [-text <string>] (Shows in daemon mode the URLs and text form in real time. -text' Option allow filter, support all grep Wildcards. Exit: Ctrl + C. only Unix).\n\"\"\"\n\n stream = open(\"config.yml\", 'r')\n dzconfig = yaml.load(stream)\n dhistory = (dzconfig['cfdumpzilla']['dhistory'])\n dbookmarks = (dzconfig['cfdumpzilla']['dbookmarks'])\n\n\tprint(\"Current options set for Dumpzilla:\\n\")\n\toptionlist = [('History', dhistory),\n \t('Bookmarks', dbookmarks)]\n\theader = u\"{0:<20}{1:>6}\".format('Option','Value')\n\tprint(header)\n\tprint(\"-\"*len(header))\n\tfor option, value in optionlist:\n\t\tprint(u\"{0:<20}{1:>6}\".format(option, value))\n\n cwd = os.getcwd()\n path = cwd + '/dumpzilla'\n os.chdir(path)\n dir_path = os.path.dirname(os.path.realpath(__file__))\n defloc = subprocess.check_output(\"find $HOME/.mozilla -type d -name \"'*.default'\"\", shell=True)\n\n\tcontedit = raw_input(\"\\nExecute with these options? y/n: \")\n\tif contedit in ['y', 'Y']:\n \tlindzcmd = 'python ' + dir_path + '/dumpzilla.py ' + defloc.rstrip() + \" \" + dhistory + \" \" + dbookmarks\n \tp = subprocess.Popen(lindzcmd, shell=True)\n\telse: \n\t\twhile contedit in ['n', 'N']:\n\t\t\tselectop = raw_input(\"\\nWhich option do you want to change? (Type done to finish editing) : \")\n\t\t\tif selectop in ['history', 'History']:\n\t\t\t\thoutput = raw_input(\"\\nEnter url, date, title, or range: \")\n\t\t\telif selectop in ['bookmarks', 'Bookmarks']:\n\t\t\t\thformat = raw_input(\"\\nEnter a range: \")\n\t\t\telif selectop in ['done', 'Done']:\n\t\t\t\tcontedit = 'y'\n \t\t\tlindzcmd = 'python ' + dir_path + '/dumpzilla.py ' + defloc.rstrip() + \" \" + dhistory + \" \" + dbookmarks\n \t\t\tp = subprocess.Popen(lindzcmd, shell=True)\n\n\n def do_hindsight(self, args):\n \"\"\" Options:\n -h, --help show this help message and exit\n -i INPUT, --input INPUT\n Path to the Chrome(ium) \"Default\" directory\n -o OUTPUT, --output OUTPUT\n Name of the output file (without extension)\n -b {Chrome,Brave}, --browser_type {Chrome,Brave}\n Type of input files\n -f {sqlite,xlsx}, --format {sqlite,xlsx}\n Output format\n -l LOG, --log LOG Location Hindsight should log to (will append if exists)\n -t TIMEZONE, --timezone TIMEZONE\n Display timezone for the timestamps in XLSX output\n -d {mac,linux}, --decrypt {mac,linux}\n Try to decrypt Chrome data from a Linux or Mac system; support for both is currently buggy and enabling this may cause\n problems. Only use \"--decrypt linux\" on data from a Linux system, and only use \"--decrypt mac\" when running Hindsight on the\n same Mac the Chrome data is from.\n -c CACHE, --cache CACHE\n Path to the cache directory; only needed if the directory is outside the given \"input\" directory. Mac systems are setup this\n way by default.\n\"\"\"\n\n stream = open(\"config.yml\", 'r')\n hsconfig = yaml.load(stream)\n hformat = (hsconfig['cfhindsight']['hformat'])\n houtput = (hsconfig['cfhindsight']['houtput'])\n\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n cmdhsLinux = '/' + getpass.getuser() + '/.config/google-chrome/Default'\n\n\n\tprint(\"Current options set for Hindsight:\\n\")\n\toptionlist = [('Format', hformat),\n \t('Output', houtput)]\n\theader = u\"{0:<20}{1:>6}\".format('Option','Value')\n\tprint(header)\n\tprint(\"-\"*len(header))\n\tfor option, value in optionlist:\n\t\tprint(u\"{0:<20}{1:>6}\".format(option, value))\n\n\n\tcontedit = raw_input(\"\\nExecute with these options? y/n: \")\n\tif contedit in ['y', 'Y']:\n\t\tlinhscmd = 'python ' + dir_path + '/hindsight_master/hindsight.py -i ' + cmdhsLinux + \" \" + houtput + \" \" + hformat\n\t\tp = subprocess.Popen(linhscmd, shell=True)\n\telse: \n\t\twhile contedit in ['n', 'N']:\n\t\t\tselectop = raw_input(\"\\nWhich option do you want to change? (Type done to finish editing) : \")\n\t\t\tif selectop in ['output', 'Output']:\n\t\t\t\thoutput = raw_input(\"\\nEnter new output path (include -o but do not include file extension): \")\n\t\t\telif selectop in ['format', 'Format']:\n\t\t\t\thformat = raw_input(\"\\nSpecify the format (-f sqlite or -f xlsx): \")\n\t\t\telif selectop in ['done', 'Done']:\n\t\t\t\tcontedit = 'y'\n\t\t\t\tlinhscmd = 'python ' + dir_path + '/hindsight_master/hindsight.py -i ' + cmdhsLinux + \" \" + houtput + \" \" + hformat\n\t\t\t\tp = subprocess.Popen(linhscmd, shell=True)\n\n\n def do_exit(self, args):\n \"\"\"Exits the program\"\"\"\n print (\"Exiting.\")\n raise SystemExit\n\nif __name__ == '__main__':\n prompt = cprompt()\n prompt.prompt = 'aaf>> '\n prompt.cmdloop('''Starting...''')\n" } ]
1
erikbohnsack/reinforcement-achtung
https://github.com/erikbohnsack/reinforcement-achtung
15c9814ad778ef3de99d38ab040a2436c15a08f7
bf7f31d822a6cadc6ec93af56d34f8f891fd020b
00b6666621dbacddada9c93e395578722e5a8e34
refs/heads/master
2020-04-01T23:35:12.756299
2019-03-09T23:14:38
2019-03-09T23:16:10
153,764,847
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.792553186416626, "alphanum_fraction": 0.7952127456665039, "avg_line_length": 33.227272033691406, "blob_id": "f9eee3df2c9c8779fb85be57e15c1826fa598b11", "content_id": "4c254ec733bd38a0d1a560c677d5489354a2eaee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 752, "license_type": "no_license", "max_line_length": 77, "num_lines": 22, "path": "/README.md", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "# achtung-die-PLE\n\nA try to master Achtung Die Kurve using Reinforcement learning.\n\nEnded up creating a technically proficient little devil, \nbut without any kind of tactical sense. This was due to\nthe environment the agent could see was narrowed down to \na handful of beams, signalling the distance to a wall/opponent \nin a couple of different angles. \n\n`gym_achtung/envs` holds the `AchtungDieKurve` class,\nwhich is basically the game, subclassed from OpenAI `gym.Env`.\n\n`agent_achtung/TrainAchtung` are different training alternatives.\nFull image uses the full image as input. Random Opponent competes against\nrandom bots just thrown onto the playing field. There are also `EnjoyAchtung`\nfiles which basically \n\n## Requirements\n\n1. gym\n2. baselines" }, { "alpha_fraction": 0.5959240794181824, "alphanum_fraction": 0.6148980855941772, "avg_line_length": 27.081632614135742, "blob_id": "f91b818bf0098b9d720c0eef48a3c8f7c38442d0", "content_id": "8797149d039621e52dc0bd77c6ab63b3302ffbbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 78, "num_lines": 49, "path": "/agent_achtung/TrainAchtungBaselinesRandomOpponent.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import gym_achtung\r\nimport gym\r\n#from gym.wrappers import Monitor\r\nfrom baselines import deepq\r\nimport time\r\nimport json\r\n\r\ndef main():\r\n env = gym.make(\"AchtungDieKurveRandomOpponent-v1\")\r\n\r\n #env = Monitor(env, directory='./Monitor', force=True)\r\n\r\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n outputPathModel = 'achtung_RO_model_' + str(timestr) + '.pkl'\r\n outputPathInfo = 'achtung_RO_info_' + str(timestr) + '.txt'\r\n\r\n infoDict = {}\r\n infoDict['total_timesteps'] = 1000000\r\n infoDict['lr'] = 1e-4\r\n infoDict['buffer_size'] = 100000\r\n infoDict['exploration_fraction'] = 0.2\r\n infoDict['prioritized_replay'] = True\r\n\r\n print(\"Saving training information to achtung_RO_info_%Y%m%d-%H%M%S.txt\")\r\n with open(outputPathInfo, 'w') as file:\r\n file.write(json.dumps(infoDict)) # use `json.loads` to do the reverse\r\n\r\n\r\n act = deepq.learn(\r\n env,\r\n network='mlp',\r\n lr=5e-4,\r\n total_timesteps=infoDict['total_timesteps'],\r\n buffer_size=infoDict['buffer_size'],\r\n exploration_fraction=infoDict['exploration_fraction'],\r\n exploration_final_eps=0.02,\r\n prioritized_replay=infoDict['prioritized_replay'],\r\n print_freq=1000,\r\n callback=None,\r\n render=False\r\n )\r\n print(\"Saving model to achtung_RO_model_%Y%m%d-%H%M%S.pkl\")\r\n act.save(outputPathModel)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()" }, { "alpha_fraction": 0.697265625, "alphanum_fraction": 0.7412109375, "avg_line_length": 24, "blob_id": "cd569f577f773d9e474ee4e5438266ce4b3f706c", "content_id": "7526429ccb9463bb59bfe7125dab325f35bd86d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 74, "num_lines": 41, "path": "/gym_achtung/__init__.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "from gym.envs.registration import register\n\nregister(\n id='AchtungDieKurve-v1',\n entry_point='gym_achtung.envs:AchtungDieKurve',\n timestep_limit=100000,\n reward_threshold=1.0,\n nondeterministic=True,\n)\n\nregister(\n id='AchtungDieKurveRandomOpponent-v1',\n entry_point='gym_achtung.envs:AchtungDieKurveRandomOpponent',\n timestep_limit=100000,\n reward_threshold=1.0,\n nondeterministic=True,\n)\n\nregister(\n id='AchtungDieKurveFullImage-v1',\n entry_point='gym_achtung.envs:AchtungDieKurveFullImage',\n timestep_limit=100000,\n reward_threshold=1.0,\n nondeterministic=True,\n)\n\nregister(\n id='AchtungDieKurveFullImageRandomOpponent-v1',\n entry_point='gym_achtung.envs:AchtungDieKurveFullImageRandomOpponent',\n timestep_limit=100000,\n reward_threshold=1.0,\n nondeterministic=True,\n)\n\nregister(\n id='AchtungDieKurveAgainstBot-v1',\n entry_point='gym_achtung.envs:AchtungDieKurveAgainstBot',\n timestep_limit=100000,\n reward_threshold=1.0,\n nondeterministic=True,\n)" }, { "alpha_fraction": 0.5351051688194275, "alphanum_fraction": 0.5495942831039429, "avg_line_length": 27.620853424072266, "blob_id": "62fc602e6ea0d6a3de19dac6825a937a0495083f", "content_id": "3d8c5802855738daa583cf32572e7726ac83ec03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12078, "license_type": "no_license", "max_line_length": 148, "num_lines": 422, "path": "/gym_achtung/envs/achtungdiekurve_fullimage_opponents.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import pygame\nimport sys\nimport math\nimport random\n#import .base\nimport time\nimport random\nfrom gym import spaces\nimport gym\nfrom pygame.constants import KEYDOWN, KEYUP, K_F15\nfrom pygame.constants import K_w, K_a, K_s, K_d\nimport numpy as np\n\nWINWIDTH = 480 # width of the program's window, in pixels\nWINHEIGHT = 480 # height in pixels\nTEXT_SPACING = 130\nRADIUS = 2 # radius of the circles\nPLAYERS = 1 # number of players\nSKIP_PROBABILITY = 0\nSPEED_CONSTANT = 2\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nP1COLOUR = RED\nP2COLOUR = GREEN\nP3COLOUR = BLUE\nBG_COLOR = (25, 25, 25)\n\n\n# basically just holds onto all of them\nclass AchtungPlayer:\n\n def __init__(self, color, width, randomize=0):\n self.color = color\n self.score = 0\n self.skip = 0\n # generates random position and direction\n self.width = width\n self.x = random.randrange(50, WINWIDTH - WINWIDTH/4)\n self.y = random.randrange(50, WINHEIGHT - WINHEIGHT/4)\n self.angle = random.randrange(0, 360)\n self.randomize = randomize\n\n def move(self):\n # computes current movement\n if self.randomize:\n self.angle += np.random.choice([-5, 0, 5])\n if self.angle > 360:\n self.angle -= 360\n elif self.angle < 0:\n self.angle += 360\n self.x += int(RADIUS * SPEED_CONSTANT * math.cos(math.radians(self.angle)))\n self.y += int(RADIUS * SPEED_CONSTANT * math.sin(math.radians(self.angle)))\n\n def draw(self, screen):\n if self.skip:\n self.skip = 0\n elif random.random() < SKIP_PROBABILITY:\n self.skip = 1\n else:\n pygame.draw.circle(screen, self.color, (self.x, self.y), self.width)\n\n def update(self):\n self.move()\n\n\nclass AchtungDieKurveFullImageRandomOpponent(gym.Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n \"\"\"\n Parameters\n ----------\n width : int\n Screen width.\n\n height : int\n Screen height, recommended to be same dimension as width.\n\n init_length : int (default: 3)\n The starting number of segments the snake has. Do not set below 3 segments. Has issues with hitbox detection with the body for lower values.\n\n \"\"\"\n\n def __init__(self,\n width=WINWIDTH + TEXT_SPACING,\n height=WINHEIGHT, fps=30, frame_skip=1, num_steps=1,\n force_fps=True, add_noop_action=False, rng=24):\n\n self.actions = {\n \"left\": K_a,\n \"right\": K_d,\n \"NOOP\":K_F15\n }\n\n self.score = 0.0 # required.\n self.lives = 0 # required. Can be 0 or -1 if not required.\n self.ticks = 0\n self.previous_score = 0\n self.frame_count = 0\n self.fps = fps\n self.frame_skip = frame_skip\n self.num_steps = num_steps\n self.force_fps = force_fps\n self.viewer = None\n self.add_noop_action = add_noop_action\n self.last_action = []\n self.action = []\n self.height = height\n self.width = width\n self.screen_dim = (width, height) # width and height\n self.allowed_fps = None # fps that the game is allowed to run at.\n self.NOOP = K_F15 # the noop key\n self.rng = None\n self._action_set = self.getActions()\n self.action_space = spaces.Discrete(len(self._action_set))\n self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_dim[0], self.screen_dim[1], 3),\n dtype=np.uint8)\n self.rewards = { # TODO: take as input\n \"positive\": 1.0,\n \"negative\": -1.0,\n \"tick\": 1,\n \"loss\": 0,\n \"win\": 5.0\n }\n self.BG_COLOR = BG_COLOR\n\n self._setup()\n self.init()\n self.my_font = pygame.font.SysFont('bauhaus93', 37)\n\n def _setup(self):\n \"\"\"\n Setups up the pygame env, the display and game clock.\n \"\"\"\n pygame.init()\n self.screen = pygame.display.set_mode(self.getScreenDims(), 0, 32)\n self.clock = pygame.time.Clock()\n\n def getActions(self):\n \"\"\"\n Gets the actions the game supports. Optionally inserts the NOOP\n action if PLE has add_noop_action set to True.\n\n Returns\n --------\n\n list of pygame.constants\n The agent can simply select the index of the action\n to perform.\n\n \"\"\"\n actions = self.actions\n if isinstance(actions, dict):\n actions = actions.values()\n\n actions = list(actions)\n\n if self.add_noop_action:\n actions.append(self.NOOP)\n return actions\n\n def _handle_player_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.KEYDOWN:\n key = event.key\n if key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n if key == self.actions[\"left\"]:\n self.player.angle -= 10\n\n if key == self.actions[\"right\"]:\n self.player.angle += 10\n\n def setAction(self, action, last_action):\n \"\"\"\n Pushes the action to the pygame event queue.\n \"\"\"\n if action is None:\n action = self.NOOP\n\n if last_action is None:\n last_action = self.NOOP\n\n kd = pygame.event.Event(KEYDOWN, {\"key\": action})\n ku = pygame.event.Event(KEYUP, {\"key\": last_action})\n\n pygame.event.post(kd)\n pygame.event.post(ku)\n\n def _setAction(self, action):\n \"\"\"\n Instructs the game to perform an action if its not a NOOP\n \"\"\"\n\n if action is not None:\n self.setAction(action, self.last_action)\n\n self.last_action = action\n\n def act(self, action):\n \"\"\"\n Perform an action on the game. We lockstep frames with actions. If act is not called the game will not run.\n\n Parameters\n ----------\n\n action : int\n The index of the action we wish to perform. The index usually corresponds to the index item returned by getActionSet().\n\n Returns\n -------\n\n int\n Returns the reward that the agent has accumlated while performing the action.\n\n \"\"\"\n return sum(self._oneStepAct(action) for i in range(self.frame_skip))\n\n def get_keys_to_action(self):\n return self.actions\n\n def _oneStepAct(self, action):\n \"\"\"\n Performs an action on the game. Checks if the game is over or if the provided action is valid based on the allowed action set.\n \"\"\"\n\n if self.game_over():\n return 0.0\n\n if action not in self.getActions():\n action = self.NOOP\n\n self._setAction(action)\n for i in range(self.num_steps):\n time_elapsed = self._tick()\n self.__step()\n\n self.frame_count += self.num_steps\n\n return self._getReward()\n\n def _getReward(self):\n \"\"\"\n Returns the reward the agent has gained as the difference between the last action and the current one.\n \"\"\"\n reward = self.getScore() - self.previous_score\n self.previous_score = self.getScore()\n\n return reward\n\n def _tick(self):\n \"\"\"\n Calculates the elapsed time between frames or ticks.\n \"\"\"\n if self.force_fps:\n return 1000.0 / self.fps\n else:\n return self.tick(self.fps)\n\n def tick(self, fps):\n \"\"\"\n This sleeps the game to ensure it runs at the desired fps.\n \"\"\"\n return self.clock.tick_busy_loop(fps)\n\n def adjustRewards(self, rewards):\n \"\"\"\n\n Adjusts the rewards the game gives the agent\n\n Parameters\n ----------\n rewards : dict\n A dictonary of reward events to float rewards. Only updates if key matches those specificed in the init function.\n\n \"\"\"\n for key in rewards.keys():\n if key in self.rewards:\n self.rewards[key] = rewards[key]\n\n def getScreenRGB(self):\n \"\"\"\n Returns the current game screen in RGB format.\n\n Returns\n --------\n numpy uint8 array\n Returns a numpy array with the shape (width, height, 3).\n\n \"\"\"\n\n return pygame.surfarray.array3d(\n pygame.display.get_surface()).astype(np.uint8)\n\n def getScreenDims(self):\n \"\"\"\n Gets the screen dimensions of the game in tuple form.\n\n Returns\n -------\n tuple of int\n Returns tuple as follows (width, height).\n\n \"\"\"\n return self.screen_dim\n\n def getScore(self):\n return self.score\n\n def game_over(self):\n return self.lives == -1\n\n def setRNG(self, rng):\n \"\"\"\n Sets the rng for games.\n \"\"\"\n\n if self.rng is None:\n self.rng = rng\n\n def init(self):\n \"\"\"\n Starts/Resets the game to its inital state\n \"\"\"\n self.player = AchtungPlayer(BLUE, RADIUS)\n self.opponent = AchtungPlayer(RED, RADIUS, randomize=1)\n self.opponent2 = AchtungPlayer(RED, RADIUS, randomize=1)\n self.opponent3 = AchtungPlayer(RED, RADIUS, randomize=1)\n self.screen.fill(self.BG_COLOR)\n self.score = 0\n self.ticks = 0\n self.lives = 1\n\n def __step(self):\n \"\"\"\n Perform one step of game emulation.\n \"\"\"\n\n self.ticks += 1\n self._handle_player_events()\n self.score += self.rewards[\"tick\"]\n self.player.update()\n self.opponent.update()\n self.opponent2.update()\n self.opponent3.update()\n self.collision()\n self.player.draw(self.screen)\n self.opponent.draw(self.screen)\n self.opponent2.draw(self.screen)\n self.opponent3.draw(self.screen)\n\n def collision(self):\n collide_check = 0\n try:\n x_check = (self.player.x < 0) or \\\n (self.player.x > self.width)\n y_check = (self.player.y < 0) or \\\n (self.player.y > self.height)\n\n collide_check = self.screen.get_at((self.player.x, self.player.y)) != BG_COLOR\n except IndexError:\n x_check = (self.player.x < 0) or (self.player.x > self.width)\n y_check = (self.player.y < 0) or (self.player.y > self.height)\n\n if self.player.skip:\n collide_check = 0\n if x_check or y_check or collide_check:\n self.lives = -1\n\n if self.lives <= 0.0:\n self.score += self.rewards[\"loss\"]\n\n def step(self, a):\n try:\n reward = self.act(self._action_set[a])\n except IndexError:\n reward = self.act(self._action_set[a[0]])\n state = self.getScreenRGB()\n terminal = self.game_over()\n return state, reward, terminal, {}\n\n def reset(self):\n self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_dim[0], self.screen_dim[1], 3),\n dtype=np.uint8)\n self.last_action = []\n self.action = []\n self.previous_score = 0.0\n self.init()\n state = self.getScreenRGB()\n return state\n\n def render(self, mode='human', close=False):\n pygame.display.update()\n\n def seed(self, seed):\n rng = np.random.RandomState(seed)\n self.rng = rng\n self.init()\n\n\nif __name__ == \"__main__\":\n\n pygame.init()\n game = AchtungDieKurveFullImage(width=WINWIDTH, height=WINHEIGHT)\n game.clock = pygame.time.Clock()\n game.rng = np.random.RandomState(24)\n game.init()\n\n while True:\n if game.game_over():\n game.init()\n\n dt = game.clock.tick_busy_loop(30)\n game.step(dt)\n pygame.display.update()\n" }, { "alpha_fraction": 0.5890411138534546, "alphanum_fraction": 0.5965130925178528, "avg_line_length": 25.220338821411133, "blob_id": "0d924adda84564cc2624f64516e5e235e0fad2c7", "content_id": "bceb1a94d7449bbdf2fc4cdafcdc5fc1e1d81b91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1606, "license_type": "no_license", "max_line_length": 82, "num_lines": 59, "path": "/agent_achtung/EnjoyAchtung.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import gym\r\nimport gym_achtung\r\nfrom baselines import deepq\r\nfrom gym.wrappers import Monitor\r\nimport pickle\r\nimport os\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\nmodelToRun = 'achtung_best_bot.pkl'\r\n\r\ndef main():\r\n env = gym.make(\"AchtungDieKurve-v1\")\r\n act = deepq.learn(env, network='mlp', total_timesteps=0, load_path=modelToRun)\r\n\r\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n outputPath = './Sharks/' + timestr\r\n os.makedirs(outputPath)\r\n\r\n #env = Monitor(env, directory= '/Monitor', force=True)\r\n\r\n meanRewards = []\r\n qValues = []\r\n numberOfEvaluations = 50\r\n eval = 0\r\n while eval < numberOfEvaluations:\r\n eval += 1\r\n obs, done = env.reset(), False\r\n episode_rew = 0\r\n episode_qVal = []\r\n while not done:\r\n getActionQvalue = act(obs)\r\n action = getActionQvalue[0][0]\r\n\r\n obs, rew, done, _ = env.step(action)\r\n #time.sleep(0.1)\r\n\r\n episode_rew += rew\r\n episode_qVal.append(getActionQvalue[1])\r\n\r\n print(\"Episode reward\", episode_rew)\r\n meanRewards.append(episode_rew)\r\n qValues.append(episode_qVal)\r\n\r\n\r\n outputNameReward = outputPath + '/EnjoyReward.pkl'\r\n outputNameQvalues = outputPath + '/EnjoyQvalues.pkl'\r\n\r\n with open(outputNameReward, 'wb') as f:\r\n pickle.dump(meanRewards, f)\r\n print('Rewards dumped @ ' + outputNameReward )\r\n\r\n with open(outputNameQvalues, 'wb') as f:\r\n pickle.dump(qValues, f)\r\n print('Qvalues dumped @ ' + outputNameQvalues)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.5701634883880615, "alphanum_fraction": 0.5769754648208618, "avg_line_length": 22.46666717529297, "blob_id": "6e61d9bbbf31ce48af515d2fcadcdf048dfc5052", "content_id": "d66b24f0180dbb3a55d86a0635ec41cf15523a96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1468, "license_type": "no_license", "max_line_length": 60, "num_lines": 60, "path": "/agent_achtung/EnjoyAchtungRandom.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import gym\r\nimport gym_achtung\r\nfrom gym.wrappers import Monitor\r\nimport pickle\r\nimport os\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# The world's simplest agent!\r\nclass RandomAgent(object):\r\n def __init__(self, action_space):\r\n self.action_space = action_space\r\n\r\n def act(self, observation, reward, done):\r\n return self.action_space.sample()\r\n\r\n\r\ndef main():\r\n env = gym.make(\"AchtungDieKurve-v1\")\r\n agent = RandomAgent(env.action_space)\r\n\r\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n outputPath = './Sharks/' + timestr\r\n os.makedirs(outputPath)\r\n\r\n #env = Monitor(env, directory= '/Monitor', force=True)\r\n render = False\r\n\r\n meanRewards = []\r\n numberOfEvaluations = 100\r\n eval = 0\r\n while eval < numberOfEvaluations:\r\n eval += 1\r\n obs, done = env.reset(), False\r\n episode_rew = 0\r\n rew = 0\r\n while not done:\r\n if render:\r\n env.render()\r\n\r\n action = agent.act(obs, rew, done)\r\n\r\n\r\n obs, rew, done, _ = env.step([action])\r\n #time.sleep(0.1)\r\n\r\n episode_rew += rew\r\n\r\n print(\"Episode reward\", episode_rew)\r\n meanRewards.append(episode_rew)\r\n\r\n outputNameReward = outputPath + '/EnjoyRandomReward.pkl'\r\n\r\n with open(outputNameReward, 'wb') as f:\r\n pickle.dump(meanRewards, f)\r\n print('Rewards dumped @ ' + outputNameReward )\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.6453201770782471, "alphanum_fraction": 0.6600984930992126, "avg_line_length": 14.615385055541992, "blob_id": "e6301708c6b42969c14e0874d3292b5f4915bc14", "content_id": "b8679649e4d5cac87ab944768c7a2a682c7bb22b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 63, "num_lines": 13, "path": "/agent_achtung/EnjoyYourself.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import gym\nimport gym_achtung\n\nfrom gym.utils import play\n\n\ndef main():\n env = gym.make(\"AchtungDieKurveFullImageRandomOpponent-v1\")\n play.play(env, fps=30)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5774435997009277, "alphanum_fraction": 0.6052631735801697, "avg_line_length": 27.600000381469727, "blob_id": "74295dc6fa0be5bf66b4914f5aa8c5da247f5ef1", "content_id": "cf61e78498f8524572804d744ea43c9dbb427902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1330, "license_type": "no_license", "max_line_length": 85, "num_lines": 45, "path": "/agent_achtung/TrainAchtungBaselinesFullImage.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import gym_achtung\r\nimport gym\r\n#from gym.wrappers import Monitor\r\nfrom baselines import deepq\r\nimport time\r\nimport json\r\n\r\ndef main():\r\n env = gym.make(\"AchtungDieKurveFullImage-v1\")\r\n\r\n #env = Monitor(env, directory='./Monitor', force=True)\r\n\r\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n outputPathModel = 'achtung_FullImage_model_' + str(timestr) + '.pkl'\r\n outputPathInfo = 'achtung_FullImage_info_' + str(timestr) + '.txt'\r\n\r\n #print(\"Saving training information to achtung_FullImage_info_%Y%m%d-%H%M%S.txt\")\r\n #with open(outputPathInfo, 'w') as file:\r\n # file.write(json.dumps(infoDict)) # use `json.loads` to do the reverse\r\n\r\n act = deepq.learn(\r\n env,\r\n network='conv_only',\r\n lr=1e-4,\r\n buffer_size=10000,\r\n total_timesteps=1000,\r\n exploration_fraction=0.1,\r\n exploration_final_eps=0.01,\r\n train_freq=4,\r\n learning_starts=10000,\r\n target_network_update_freq=1000,\r\n gamma=0.99,\r\n prioritized_replay=True,\r\n prioritized_replay_alpha=0.6,\r\n checkpoint_freq=10000,\r\n checkpoint_path=None,\r\n dueling=True,\r\n render=False\r\n )\r\n print(\"Saving model to achtung_FullImage_model_%Y%m%d-%H%M%S.pkl\")\r\n act.save(outputPathModel)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()" }, { "alpha_fraction": 0.5940997004508972, "alphanum_fraction": 0.6007121205329895, "avg_line_length": 24.519479751586914, "blob_id": "03b173473a02440a49adbd9bad95b2e0870394d2", "content_id": "b0b12debb967187189abbf315d588c0a9934de8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1966, "license_type": "no_license", "max_line_length": 85, "num_lines": 77, "path": "/agent_achtung/train_achtung.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import logging\nimport os, sys\nimport numpy as np\nimport gym\nfrom gym.wrappers import Monitor\nfrom agentStuff import DQNAgent\nimport matplotlib.pyplot as plt\n\nimport gym_achtung\n\n\nif __name__ == '__main__':\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n env = gym.make('AchtungDieKurve-v1' if len(sys.argv)<2 else sys.argv[1])\n\n\n # Initializations\n num_actions = env.action_space.n\n\n obs_dim = env.observation_space.shape[0]\n\n # Choose agent. Choose wisely.\n agent = DQNAgent(state_size=obs_dim, action_size=num_actions)\n\n\n outdir = '/Users/adamlilja/Code/achtung-die-PLE/tmp'\n env = Monitor(env, directory=outdir, force=True)\n env.seed(0)\n\n episode_count = 100\n reward = 0\n done = False\n\n fitnessValues = np.zeros(episode_count)\n\n for i in range(episode_count):\n state = env.reset()\n state = np.reshape(state, [1, obs_dim])\n\n fitness = 0\n\n while True:\n env.render()\n\n # Decide action\n action = agent.act(state)\n\n # Take action\n next_state, reward, done, _ = env.step(action)\n next_state = np.reshape(next_state, [1, obs_dim])\n fitness += reward\n\n # Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n\n # make next_state the new current state for the next frame.\n state = next_state\n\n if done:\n # print the score and break out of the loop\n print(\"episode: {}/{}, reward: {}\".format(i, episode_count, fitness))\n fitnessValues[i] = fitness\n break\n # train the agent with the experience of the episode\n agent.replay(8)\n\n plt.plot(fitnessValues)\n plt.ylabel('Fitness')\n plt.xlabel('Epoch')\n plt.show()\n\n # Dump result info to disk\n env.close()\n\n logger.info(\"Successfully ran training\")\n\n" }, { "alpha_fraction": 0.5718997120857239, "alphanum_fraction": 0.5999340415000916, "avg_line_length": 27.60377311706543, "blob_id": "a798780eac8bddb110d95ec6b4ef9769a1fdc9a0", "content_id": "353e4c14b9327cb95fe3b1fccf9e5077df1ef75d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3032, "license_type": "no_license", "max_line_length": 117, "num_lines": 106, "path": "/gym_achtung/envs/achtungdiekurveAgainstBot.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import pygame\nimport numpy as np\nfrom gym_achtung.envs.achtungdiekurve import AchtungDieKurve, AchtungPlayer\nfrom gym import spaces\n\nWINWIDTH = 480 # width of the program's window, in pixels\nWINHEIGHT = 480 # height in pixels\nTEXT_SPACING = 130\nRADIUS = 2 # radius of the circles\nPLAYERS = 1 # number of players\nSKIP_PROBABILITY = 0\nSPEED_CONSTANT = 2\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nP1COLOUR = RED\nP2COLOUR = GREEN\nP3COLOUR = BLUE\nBG_COLOR = (25, 25, 25)\nBEAM_SIGHT = 240\nBEAM_MAX_ANGLE = 120\nBEAM_STEP = 30\nBEAMS = range(BEAM_MAX_ANGLE, -BEAM_MAX_ANGLE-BEAM_STEP, -BEAM_STEP)\n\n\n# basically just holds onto all of them\n\nclass AchtungDieKurveAgainstBot(AchtungDieKurve):\n\n def __init__(self):\n self.width = WINWIDTH\n self.aiscore = 0\n self.humanscore = -1\n self.aiwon = False\n self.human_init()\n\n super().__init__()\n\n def _setup(self):\n \"\"\"\n Setups up the pygame env, the display and game clock.\n \"\"\"\n pygame.init()\n self.screen = pygame.display.set_mode(self.getScreenDims())\n pygame.display.set_caption('Achtung AI AI score: {} Human score: {}'.format(self.aiscore, self.humanscore))\n self.clock = pygame.time.Clock()\n\n def _step(self):\n\n self._handle_human_player_events()\n self.humanplayer.update()\n if self.collision(self.humanplayer.x, self.humanplayer.y, self.humanplayer.skip):\n self.aiwon = True\n self.reset()\n self.humanplayer.draw(self.screen)\n super()._step()\n\n def human_init(self):\n self.humanplayer = AchtungPlayer(GREEN, RADIUS)\n\n def _handle_human_player_events(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.humanplayer.angle -= 10\n if self.humanplayer.angle <= 0:\n self.humanplayer.angle += 360\n if keys[pygame.K_RIGHT]:\n self.humanplayer.angle += 10\n if self.humanplayer.angle >= 360:\n self.humanplayer.angle -= 360\n\n def reset(self):\n if self.aiwon:\n self.aiscore += 1\n else:\n self.humanscore += 1\n pygame.display.set_caption('Achtung AI - AI score: {} Human score: {}'.format(self.aiscore, self.humanscore))\n self.human_init()\n self.aiwon = False\n self.observation_space = spaces.Box(low=0, high=WINWIDTH, shape=(12,), dtype=np.uint8)\n self.last_action = []\n self.action = []\n self.previous_score = 0.0\n self.init()\n state = self.getGameState()\n return state\n #super().reset()\n\n\nif __name__ == \"__main__\":\n\n pygame.init()\n game = AchtungDieKurveAgainstBot()\n game.clock = pygame.time.Clock()\n game.rng = np.random.RandomState(24)\n game.init()\n\n while True:\n if game.game_over():\n game.init()\n\n dt = game.clock.tick_busy_loop(30)\n game.step(dt)\n pygame.display.update()\n" }, { "alpha_fraction": 0.9019138813018799, "alphanum_fraction": 0.9019138813018799, "avg_line_length": 82.19999694824219, "blob_id": "70d5744edc6c48d09feeec35767d2d2a8b4355f6", "content_id": "225c3a0130c33ceaebd6c7bf606e0cdda2a9cf73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 103, "num_lines": 5, "path": "/gym_achtung/envs/__init__.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "from gym_achtung.envs.achtungdiekurve import AchtungDieKurve\nfrom gym_achtung.envs.achtungdiekurve_randomopponent import AchtungDieKurveRandomOpponent\nfrom gym_achtung.envs.achtungdiekurve_fullimage import AchtungDieKurveFullImage\nfrom gym_achtung.envs.achtungdiekurve_fullimage_opponents import AchtungDieKurveFullImageRandomOpponent\nfrom gym_achtung.envs.achtungdiekurveAgainstBot import AchtungDieKurveAgainstBot\n\n\n" }, { "alpha_fraction": 0.5775862336158752, "alphanum_fraction": 0.5933908224105835, "avg_line_length": 23.85714340209961, "blob_id": "e29fc6a58e11543446122536e56a6c9222407067", "content_id": "994fece24b71c5f52acc372309c5a85ee07f0dd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "no_license", "max_line_length": 84, "num_lines": 28, "path": "/agent_achtung/EnjoyVsBot.py", "repo_name": "erikbohnsack/reinforcement-achtung", "src_encoding": "UTF-8", "text": "import gym\nimport gym_achtung\nfrom baselines import deepq\nimport time\n\nfrom gym.utils import play\n\nmodel_to_run = \"achtung_best_bot.pkl\"\n\ndef main():\n env = gym.make(\"AchtungDieKurveAgainstBot-v1\")\n number_of_evaluations = 50\n eval = 0\n act = deepq.learn(env, network='mlp', total_timesteps=0, load_path=model_to_run)\n while eval < number_of_evaluations:\n eval += 1\n obs, done = env.reset(), False\n print(eval)\n while not done:\n time.sleep(0.01)\n env.render()\n getActionQvalue = act(obs)\n action = getActionQvalue[0][0]\n obs, rew, done, _ = env.step(action)\n\n\nif __name__ == '__main__':\n main()\n" } ]
12
josefigueredo/telegram-py3-aws-serverless-poc
https://github.com/josefigueredo/telegram-py3-aws-serverless-poc
e1df3c2631e20902f7ebb78943792ef68db7340f
ada4d474af7a95289319759834cbe635e668055a
3df99f93fa54139e7d3780034ca3aff4f93c173a
refs/heads/master
2020-03-18T22:50:20.464424
2018-05-30T00:52:58
2018-05-30T00:52:58
135,368,319
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7555110454559326, "alphanum_fraction": 0.757515013217926, "avg_line_length": 18.230770111083984, "blob_id": "4613183eae9944f4098d8044213b9d87bbc8e7b2", "content_id": "083d32a783889510ad4fa6e66a9eb65d94cde562", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 499, "license_type": "permissive", "max_line_length": 91, "num_lines": 26, "path": "/README.md", "repo_name": "josefigueredo/telegram-py3-aws-serverless-poc", "src_encoding": "UTF-8", "text": "## Serverless Telegram bot on AWS Lambda\n\n### Description\nSimple proof of concept of Telegram chatbot + Python 3 + AWS Lambda + Serverless framework.\n\n### Deploying\n\nInstall Serverless framework:\n\n`npm install -g serverless`\n\nExport credentials:\n\n```\nexport AWS_ACCESS_KEY_ID=<Access key ID>\nexport AWS_SECRET_ACCESS_KEY=<Secret access key>\nexport TELEGRAM_TOKEN=<Your Telegram Token>\n```\n\nInstall pip requirements:\n\n`pip install -r requirements.txt -t vendored`\n\nDeploy to AWS:\n\n`serverless deploy`" }, { "alpha_fraction": 0.5912322402000427, "alphanum_fraction": 0.5959715843200684, "avg_line_length": 25.375, "blob_id": "075e323c474d910770846e27fb62fd1045531eda", "content_id": "1ace4dff0f02d52f91a28b35a795a6978a801474", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "permissive", "max_line_length": 68, "num_lines": 32, "path": "/handler.py", "repo_name": "josefigueredo/telegram-py3-aws-serverless-poc", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport sys\nhere = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(here, \"./vendored\"))\n\nimport requests\n\nTOKEN = os.environ['TELEGRAM_TOKEN']\nBASE_URL = \"https://api.telegram.org/bot{}\".format(TOKEN)\n\n\ndef hello(event, context):\n try:\n data = json.loads(event[\"body\"])\n message = str(data[\"message\"][\"text\"])\n chat_id = data[\"message\"][\"chat\"][\"id\"]\n first_name = data[\"message\"][\"chat\"][\"first_name\"]\n\n response = \"Please /start, {}\".format(first_name)\n\n if \"start\" in message:\n response = \"Hello {}\".format(first_name)\n\n data = {\"text\": response.encode(\"utf8\"), \"chat_id\": chat_id}\n url = BASE_URL + \"/sendMessage\"\n requests.post(url, data)\n\n except Exception as e:\n print(e)\n\n return {\"statusCode\": 200}\n" } ]
2
pushkardravid/spacyVisualiser
https://github.com/pushkardravid/spacyVisualiser
ef812c501f48e7d6749ee28570fa259d614d5d46
7b70e3f16b7ca9b9791082ed53cfcc562ddc69c0
19b39b71717e9ec3fdc758cb349e6c9814305332
refs/heads/master
2022-12-22T17:46:19.122276
2019-04-01T14:47:24
2019-04-01T14:47:24
177,221,786
0
0
MIT
2019-03-22T23:32:15
2019-04-01T14:47:35
2022-12-08T01:42:46
JavaScript
[ { "alpha_fraction": 0.43425604701042175, "alphanum_fraction": 0.6730104088783264, "avg_line_length": 14.621622085571289, "blob_id": "27b6f720fa9c332e9a398cc68ea991c060382139", "content_id": "85d77560c6983eac23905177e5a94db1bd504c14", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 578, "license_type": "permissive", "max_line_length": 22, "num_lines": 37, "path": "/requirements.txt", "repo_name": "pushkardravid/spacyVisualiser", "src_encoding": "UTF-8", "text": "asn1crypto==0.24.0\ncertifi==2018.11.29\ncffi==1.11.5\nchardet==3.0.4\nClick==7.0\ncryptography==2.4.2\ncymem==2.0.2\ncytoolz==0.9.0.1\ndill==0.2.8.2\nen-core-web-lg==2.0.0\nFlask==1.0.2\nidna==2.7\nitsdangerous==1.1.0\nJinja2==2.10\nMarkupSafe==1.1.1\nmkl-fft==1.0.6\nmkl-random==1.0.1\nmsgpack==0.5.6\nmsgpack-numpy==0.4.3.2\nmurmurhash==1.0.1\nnumpy==1.15.4\nplac==0.9.6\npreshed==2.0.1\npycparser==2.19\npyOpenSSL==18.0.0\nPySocks==1.6.8\nregex==2018.11.22\nrequests==2.21.0\nsix==1.12.0\nspacy==2.0.16\nthinc==6.12.1\ntoolz==0.9.0\ntqdm==4.31.1\nujson==1.35\nurllib3==1.24.1\nWerkzeug==0.15.1\nwrapt==1.10.11\n" }, { "alpha_fraction": 0.7293064594268799, "alphanum_fraction": 0.7382550239562988, "avg_line_length": 36.33333206176758, "blob_id": "32ee8695e25afb476f96e967ea3546ae665daac6", "content_id": "50b6e03a612820f1fe587588e63763fcc3b9760a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "permissive", "max_line_length": 127, "num_lines": 12, "path": "/medicalEntityTagger.py", "repo_name": "pushkardravid/spacyVisualiser", "src_encoding": "UTF-8", "text": "from medacy.model import Model\n\nmodel = Model.load_external('medacy_model_clinical_notes')\n\ndef tagMedical(text):\n annotation = model.predict(text)\n return formatResponse(annotation)\n\ndef formatResponse(annotation):\n entities_dict = annotation.get_entity_annotations(return_dictionary=True)\n entities = [{'text':entity[3], 'start':entity[1], 'end':entity[2], 'label':entity[0]} for entity in entities_dict.values()]\n return entities" }, { "alpha_fraction": 0.6487758755683899, "alphanum_fraction": 0.6572504639625549, "avg_line_length": 31.212121963500977, "blob_id": "f03aa13c0feeb16dee30a948cfade129dde1d162", "content_id": "072cce1a985c4fc9507217a8506dc4f5059305c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1062, "license_type": "permissive", "max_line_length": 109, "num_lines": 33, "path": "/app.py", "repo_name": "pushkardravid/spacyVisualiser", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify\nfrom flask import render_template\nimport json\nfrom entityTagger import tag\n#from medicalEntityTagger import tagMedical\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/ner', methods=['POST'])\ndef nerTagger():\n data = json.loads(request.get_data().decode())\n text = data['text']\n tagged_entities = tag(text)\n resp = [{'text':t[0], 'start':t[1], 'end':t[2], 'label':t[3]} for t in tagged_entities]\n tags = list(set([entity['label'] for entity in resp]))\n return jsonify({'tagged_entities':resp, 'tags':tags})\n\n'''\n@app.route('/nerMedical', methods=['POST'])\ndef nerTaggerMedical(): \n data = json.loads(request.get_data().decode())\n text = data['text']\n tagged_entities = tagMedical(text)\n tags = [tagged_entity['label'] for tagged_entity in tagged_entities] if len(tagged_entities) != 0 else []\n return jsonify({'tagged_entities':tagged_entities, 'tags':tags})\n'''\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)" }, { "alpha_fraction": 0.3902912735939026, "alphanum_fraction": 0.47961166501045227, "avg_line_length": 22.454545974731445, "blob_id": "194ee160b53dc260d4136f6899a7b8dbf13db02c", "content_id": "6aafedf8227f69da186e498da921770b0409fafa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "permissive", "max_line_length": 50, "num_lines": 22, "path": "/tagger.py", "repo_name": "pushkardravid/spacyVisualiser", "src_encoding": "UTF-8", "text": "colorMap = {\n 'GPE':'#feca74',\n 'PERSON':'#aa9cfc',\n 'ORG':'#7aecec',\n 'NORP':'#c887fb',\n 'LAW':'#ff8197',\n 'ORDINAL':'#e4e7d2',\n 'CARDINAL':'#e4e7d2',\n 'FAC':'#ddd',\n 'LOC':'#ff9561',\n 'LANGUAGE':'#ff8197',\n 'DATE':'#bfe1d9',\n 'TIME':'#bfe1d9',\n 'PERCENT':'#e4e7d2',\n 'MONEY':'#e4e7d2',\n 'QUANTITY':'#e4e7d2',\n 'PRODUCT': '#17a2b8',\n 'WORK_OF_ART': '#f0d0ff',\n 'EVENT':'#ffeb80'\n}\nfor k,v in colorMap.items():\n print('.' + k + '{' + 'background:'+ v + ';}')" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 11.583333015441895, "blob_id": "5035410c3f35ba133733f500b13dd8df5fe5454d", "content_id": "d914423e3ba09ffe445116a833e478c9c8a59e88", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "permissive", "max_line_length": 39, "num_lines": 12, "path": "/README.md", "repo_name": "pushkardravid/spacyVisualiser", "src_encoding": "UTF-8", "text": "## Spacy entity visualiser for browser\n\n###### Setup\n\n```\npip install -r requirements.txt\n\npython -m spacy download en_core_web_lg\n\npython app.py\n\n```\n\n" }, { "alpha_fraction": 0.6549999713897705, "alphanum_fraction": 0.6549999713897705, "avg_line_length": 24.125, "blob_id": "bc9982c4d6f52fdec91bf2d373cf2c6e150d19e2", "content_id": "e8bff615319b0cea9850713ce54d75fe78158bdb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "permissive", "max_line_length": 86, "num_lines": 8, "path": "/entityTagger.py", "repo_name": "pushkardravid/spacyVisualiser", "src_encoding": "UTF-8", "text": "import en_core_web_sm\n\nnlp = en_core_web_sm.load()\n\ndef tag(text):\n doc = nlp(text)\n tagged_entities = [(X.text, X.start_char, X.end_char, X.label_) for X in doc.ents]\n return tagged_entities" } ]
6
bielinzz/Improving-NQG-with-CGAN
https://github.com/bielinzz/Improving-NQG-with-CGAN
cbc9926ebf580936d77379fa60e9ba17068d5c65
b589624cefe398d6c97ae0c428f6995a86f743d9
f285ae2af489f86b07bf18611bd46db922b7eac4
refs/heads/master
2020-04-27T08:38:37.279929
2019-11-23T04:09:28
2019-11-23T04:09:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5450027585029602, "alphanum_fraction": 0.5543898344039917, "avg_line_length": 35.2400016784668, "blob_id": "d0ee8cd8e50c9cb512bf531292a8e3c6be9947ee", "content_id": "32eee5c3e760ccb213c3a8c078bd781d1a6eb824", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1813, "license_type": "no_license", "max_line_length": 71, "num_lines": 50, "path": "/code/reference.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\ndef ref(src_rev, rev, rev_raw, train_iter, val_iter_raw):\n def delete_pad(sent):\n pad = '<pad>'\n i = len(sent) - 1\n while sent[i] == pad:\n del sent[i]\n i -=1\n return sent\n\n train_ref = defaultdict(list)\n for i, data in enumerate(train_iter):\n # src_data = data.source[0].permute(1, 0) # batch_size, length\n src_data = data.source[0]\n src_data = src_rev.reverse(src_data)\n tgt_data = data.target[0]\n tgt = rev.reverse(tgt_data) # todo: reverse 有 unk\n batch_size = len(src_data)\n for k in range(batch_size):\n # key = \" \".join([str(idx.item()) for idx in src_data[k]])\n key = \" \".join([idx for idx in src_data[k]])\n train_ref[key].append(tgt[k].split())\n\n\n val_ref = defaultdict(list)\n val_reference = []\n for i, data in enumerate(val_iter_raw):\n src_data = data.source[0].permute(1, 0) # batch_size, length\n tgt_data = data.target[0]\n tgt = rev_raw.reverse(tgt_data)\n batch_size = src_data.size(0)\n for k in range(batch_size):\n key = \" \".join([str(idx.item()) for idx in src_data[k]])\n val_ref[key].append(delete_pad(tgt[k].split()))\n\n for i, data in enumerate(val_iter_raw):\n src_data = data.source[0].permute(1, 0) # batch_size, length\n batch_size = src_data.size(0)\n for k in range(batch_size):\n key = \" \".join([str(idx.item()) for idx in src_data[k]])\n val_reference.append(val_ref[key])\n\n # f = open('ref.txt', 'w', encoding='utf-8')\n # f.write(str(train_reference))\n # f.close()\n return train_ref, val_reference\n\nif __name__ == '__main__':\n ref(src_rev, rev, rev_raw, train_ref_iter, val_iter_raw)" }, { "alpha_fraction": 0.6298415660858154, "alphanum_fraction": 0.6377640962600708, "avg_line_length": 28.86842155456543, "blob_id": "c391db0850078050e188c378feb8d2279d5dcc56", "content_id": "64bf1e5c520dbdf800706a0eb98a6448eb48ae29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2272, "license_type": "no_license", "max_line_length": 121, "num_lines": 76, "path": "/code/helpers.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.autograd import Variable\nfrom config import device\n\ndef prepare_generator_batch(target_batch, gpu=False):\n \"\"\"\n Takes samples (a batch) and returns\n\n Inputs: samples, start_letter, cuda\n - samples: batch_size x seq_len (Tensor with a sample in each row)\n\n Returns: inp, target\n - inp: batch_size x seq_len (same as target, but with start_letter prepended)\n - target: batch_size x seq_len (Variable same as samples)\n \"\"\"\n\n target = target_batch[:,1:]\n inp= target_batch[:,:-1]\n \n inp = Variable(inp).type(torch.LongTensor)\n target = Variable(target).type(torch.LongTensor)\n\n if gpu:\n inp = inp.to(device)\n target = target.to(device)\n\n return inp, target\n\n\ndef prepare_discriminator_data(pos_samples, pos_lengths, neg_samples, neg_lengths, passages, ans, src_lens, tgt_special):\n \"\"\"\n Takes positive (target) samples, negative (generator) samples and prepares inp and target data for discriminator.\n\n Inputs: pos_samples, neg_samples\n - pos_samples: pos_size x seq_len\n - neg_samples: neg_size x seq_len\n\n Returns: inp, target\n - inp: (pos_size + neg_size) x seq_len\n - target: pos_size + neg_size (boolean 1/0)\n \"\"\"\n tgt_pad = tgt_special[0]\n\n pos_seq_len = pos_samples.size(1)\n neg_seq_len = neg_samples.size(1)\n max_seq_len = max(pos_seq_len, neg_seq_len)\n\n num_pos = len(pos_samples)\n num_neg = len(neg_samples)\n num_samples = num_pos + num_neg\n\n inp = torch.ones(num_samples, max_seq_len).type(torch.LongTensor) * tgt_pad\n inp[:num_pos, :pos_seq_len] = pos_samples\n inp[num_pos:, :neg_seq_len] = neg_samples\n\n passages = passages.repeat(2, 1)\n ans = ans.repeat(2, 1)\n src_lens = src_lens.repeat(2)\n\n lengths = torch.cat((pos_lengths, neg_lengths), 0)\n target = torch.ones(pos_samples.size()[0] + neg_samples.size()[0])\n target[:pos_samples.size()[0]] = 0\n\n # shuffle\n perm = torch.randperm(target.size()[0])\n target = target[perm]\n inp = inp[perm]\n lengths = lengths[perm]\n passages = passages[perm]\n ans = ans[perm]\n src_lens = src_lens[perm]\n inp = Variable(inp)\n target = Variable(target)\n\n\n return inp, target, lengths, passages, ans, src_lens\n\n\n" }, { "alpha_fraction": 0.5233033299446106, "alphanum_fraction": 0.5348135232925415, "avg_line_length": 40.19171142578125, "blob_id": "c703fd8f75dc3d28121ff57e05355e90a9196501", "content_id": "4a2031b18471b87ebb92bce808d5a1913d234ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15955, "license_type": "no_license", "max_line_length": 120, "num_lines": 386, "path": "/code/generator.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "import torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom data import tgt_vocab_size, src_vocab_size\nimport torch.nn.functional as F\nfrom config import max_sent_len, hidden_size\n\nclass BilinearSeqAttn(nn.Module):\n \"\"\"A bilinear attention layer over a sequence X w.r.t y:\n * o_i = softmax(x_i'Wy) for x_i in X.\n Optionally don't normalize output weights.\n \"\"\"\n\n def __init__(self, x_size, y_size, identity=False, normalize=True):\n super(BilinearSeqAttn, self).__init__()\n self.normalize = normalize\n\n # If identity is true, we just use a dot product without transformation.\n if not identity:\n self.linear = nn.Linear(y_size, x_size)\n else:\n self.linear = None\n\n def forward(self, x, y, x_mask=None):\n \"\"\"\n Args:\n x: batch * len * hdim1\n y: batch * hdim2\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha = batch * len\n \"\"\"\n Wy = self.linear(y) if self.linear is not None else y\n xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)\n if x_mask is not None:\n xWy.data.masked_fill_(x_mask.data, -float('inf'))\n alpha = F.softmax(xWy, dim=-1)\n return alpha\n\n\nclass encoder(nn.Module):\n def __init__(self, emb_src, emb_ans, src_special, ans_special):\n super(encoder, self).__init__()\n self.layers = 1\n self.num_directions = 2 \n self.hidden_size = int(hidden_size / self.num_directions)\n self.src_vocab_size = src_vocab_size + 3 # add pad, unk, eos\n self.emb_dim=300\n self.ans_dim=16\n self.dropout=0.3\n self.src_pad = src_special[0]\n self.src_unk = src_special[1]\n self.src_eos = src_special[2]\n self.ans_pad = ans_special[0]\n self.ans_unk = ans_special[1]\n self.ans_eos = ans_special[2]\n self.embedding = emb_src\n self.emb_ans = emb_ans\n self.rnn = nn.GRU(self.emb_dim + self.ans_dim, self.hidden_size,\n num_layers=self.layers,\n dropout=self.dropout,\n bidirectional=True)\n\n def forward(self, input, hidden=None):\n \"\"\"\n input: (source, lengths, ans)\n \"\"\"\n lengths = input[1].data.tolist() # lengths data is wrapped inside a Variable\n input_emb = self.embedding(input[0]) # input_emb: length X batch_size X vocab_size\n ans_emb = self.emb_ans(input[2])\n input_emb = torch.cat((input_emb, ans_emb), dim=-1)\n\n emb = pack(input_emb, lengths)\n outputs, hidden_t = self.rnn(emb, hidden)\n if isinstance(input, tuple):\n outputs = unpack(outputs)[0] # outputs: length X batch_size X hidden_size\n return outputs, hidden_t\n\nclass decoder(nn.Module):\n def __init__(self, emb_tgt, tgt_special):\n super(decoder, self).__init__() \n self.n_layers = 1\n self.hidden_size = hidden_size\n self.tgt_vocab_size = tgt_vocab_size + 4 # add pad, unk, eos, sos\n self.emb_dim = 300\n self.dropout = 0.3\n self.tgt_pad = tgt_special[0]\n self.tgt_unk = tgt_special[1]\n self.tgt_eos = tgt_special[2]\n self.tgt_sos = tgt_special[3]\n\n self.embedding = emb_tgt\n self.gru = nn.GRU(self.emb_dim, self.hidden_size, dropout=self.dropout, num_layers=self.n_layers)\n # self.gru = StackedGRU(self.n_layers, self.emb_dim, self.hidden_size, self.dropout)\n self.linear = nn.Linear(2 * self.hidden_size, self.hidden_size)\n self.out = nn.Linear(self.hidden_size, self.tgt_vocab_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n self.attn = BilinearSeqAttn(self.hidden_size, self.hidden_size)\n\n def _forward(self, input, hidden):\n \"\"\"\n Embeds input and applies GRU one token at a time (seq_len = 1)\n \"\"\"\n # input dim # batch_size\n input = self.embedding(input) # batch, emb_dim\n input = input.view(1, -1, self.emb_dim) # 1, batch, hidden\n output, hidden = self.gru(input, hidden) # 1, batch, hidden(out)\n output = self.out(output.view(-1, self.hidden_size)) # batch_size x vocab_size\n output = self.softmax(output)\n return output, hidden\n\n def forward(self, input, hidden, source_hiddens, mask_src):\n \"\"\"\n Embeds input and applies GRU one token at a time (seq_len = 1)\n \"\"\"\n # input dim # batch_size\n input = self.embedding(input) # batch, emb_dim\n input = input.view(1, -1, self.emb_dim) # 1 / 2, batch, hidden\n output, hidden = self.gru(input, hidden) # 1 / 2, batch, hidden(out)\n\n source_mask = mask_src\n source_hiddens = source_hiddens.transpose(0, 1).contiguous()\n scores = self.attn(source_hiddens, output.squeeze(0), source_mask) # batch * len\n context = scores.unsqueeze(1).bmm(source_hiddens).squeeze(1)\n\n output = self.out(F.tanh(self.linear(torch.cat((context, output.squeeze(0)), 1)))) # batch_size x vocab_size\n output = self.softmax(output)\n return output, hidden, scores\n\n\nclass NQGgenerator(nn.Module):\n def __init__(self, encoder, decoder, gpu=False):\n super(NQGgenerator, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n self.tgt_sos = decoder.tgt_sos\n self.tgt_pad = decoder.tgt_pad\n self.tgt_eos = decoder.tgt_eos\n self.src_pad = encoder.src_pad\n self.src_eos = encoder.src_eos\n\n self.max_seq_len = max_sent_len\n self.tgt_vocab_size = tgt_vocab_size + 4 # decoder side\n self.gpu = gpu\n \n def forward(self, letter, dec_hidden, enc_hiddens, mask_src): # dec_hidden is from encoder\n out, hidden, _ = self.decoder(letter, dec_hidden, enc_hiddens, mask_src)\n return out, hidden\n # out, hidden, att = self.decoder(letter, dec_hidden, enc_hiddens, mask_src)\n # return out, hidden, att\n\n\n def _sample(self, batch_size, seq_len, x=None):\n res = []\n flag = False # whether sample from zero\n if x is None:\n flag = True\n if flag:\n x = Variable(torch.zeros((batch_size, 1)).long())\n if self.use_cuda:\n x = x.cuda()\n h, c = self.init_hidden(batch_size)\n samples = []\n if flag:\n for i in range(seq_len):\n output, h, c = self.step(x, h, c)\n x = output.multinomial(1)\n samples.append(x)\n else:\n given_len = x.size(1)\n lis = x.chunk(x.size(1), dim=1)\n for i in range(given_len):\n output, h, c = self.step(lis[i], h, c)\n samples.append(lis[i])\n x = output.multinomial(1)\n for i in range(given_len, seq_len):\n samples.append(x)\n output, h, c = self.step(x, h, c)\n x = output.multinomial(1)\n output = torch.cat(samples, dim=1)\n return output\n\n def sample(self, src_input, way, x=None): # todo: 每次都要经过encoder太浪费时间了。\n \"\"\"\n x : (batch_size, seq_len) input data\n Samples the network and returns num_samples samples of length max_seq_len.\n\n Outputs: samples, hidden\n - samples: num_samples x max_seq_length (a sampled sequence in each row)\n\n \"\"\"\n num_samples = len(src_input[1])\n samples = torch.ones(num_samples, self.max_seq_len).type(torch.LongTensor) * self.tgt_pad\n # add sos\n if x is None:\n samples[:, 0] = torch.ones(num_samples) * self.tgt_sos\n lengths = torch.ones(num_samples).type(torch.LongTensor)\n else:\n given_len = x.size(1)\n samples[:, :given_len] = x\n lengths = torch.ones(num_samples).type(torch.LongTensor)\n for m in range(num_samples):\n for p in range(given_len):\n if samples[m][p] == self.tgt_eos:\n lengths[m] = p+1\n break\n\n\n mask_src = src_input[0].eq(self.src_pad).permute(1, 0)\n context, enc_hidden = self.encoder(src_input)\n # hi=[-> hi; <- hi]\n h = torch.cat((enc_hidden[0], enc_hidden[1]), dim=-1).unsqueeze(0) # h: 1 X batch_size X hidden_size\n # h1 = torch.cat((enc_hidden[2], enc_hidden[3]), dim=-1).unsqueeze(0)\n # h = torch.cat((h0, h1), dim=0)\n # inp = autograd.Variable(torch.LongTensor([start_letter]*num_samples))\n\n if self.gpu:\n samples = samples.cuda()\n # inp = inp.cuda()\n h = h.cuda()\n lengths = lengths.cuda()\n\n # for j in range(num_samples):\n # h_j = h[:, j, :].unsqueeze(1)\n # inp_j = inp[j]\n # context_j = context[:, j, :].unsqueeze(1)\n # mask_src_j = mask_src[j].unsqueeze(0)\n # lengths[j] = self.max_seq_len\n #\n # for i in range(1, self.max_seq_len):\n # out_j, h_j = self.forward(inp_j, h_j, context_j, mask_src_j) # out: num_samples x vocab_size\n # out_j = torch.multinomial(torch.exp(out_j), 1) # sampling from j row\n # samples[j, i] = out_j.squeeze()\n # if samples[j, i] == tgt_eos:\n # lengths[j] = i + 1\n # break\n # else:\n # inp_j = out_j.view(-1)\n\n if x is None:\n for i in range(1, self.max_seq_len):\n out, h = self.forward(samples[:,i-1], h, context, mask_src)\n if way == 'random':\n # random sampling\n out_indexes = torch.multinomial(torch.exp(out), 1)\n if way == 'greedy':\n # greedy sampling\n out_indexes = torch.exp(out).max(1)[1].unsqueeze(1)\n\n if samples[:,i-1].eq(self.tgt_pad).sum() == num_samples:\n break\n\n # eos_mask = out_indexes.ne(tgt_pad)\n # pre_eos_mask = samples[:,i].ne(tgt_eos).ne(tgt_pad)\n _lens = 1 - samples[:,i-1].eq(self.tgt_eos) - samples[:,i-1].eq(self.tgt_pad)\n lengths = lengths + _lens.type_as(lengths)\n\n pad_mask = samples[:, i-1].eq(self.tgt_pad) + samples[:,i-1].eq(self.tgt_eos)\n # samples[:,i] = out_indexes.masked_fill_(pad_mask, tgt_pad)\n out_indexes = out_indexes.squeeze(1)\n out_indexes.masked_fill_(pad_mask, self.tgt_pad)\n samples[:, i] = out_indexes\n\n else:\n for i in range(given_len):\n out, h = self.forward(samples[:, i], h, context, mask_src)\n # out, h, att_i = self.forward(samples[:, i], h, context, mask_src)\n if way == 'random':\n # random sampling\n out_indexes = torch.multinomial(torch.exp(out), 1)\n if way == 'greedy':\n # greedy sampling\n out_indexes = torch.exp(out).max(1)[1].unsqueeze(1)\n\n\n pad_mask = samples[:, i].eq(self.tgt_pad) + samples[:, i].eq(self.tgt_eos)\n # samples[:,i] = out_indexes.masked_fill_(pad_mask, tgt_pad)\n out_indexes = out_indexes.squeeze(1)\n out_indexes.masked_fill_(pad_mask, self.tgt_pad)\n samples[:, i+1] = out_indexes\n\n _lens = 1 - samples[:, i].eq(self.tgt_eos) - samples[:, i].eq(self.tgt_pad)\n lengths = lengths + _lens.type_as(lengths)\n\n for i in range(given_len + 1, self.max_seq_len):\n out, h = self.forward(samples[:, i - 1], h, context, mask_src)\n\n if way == 'random':\n # random sampling\n out_indexes = torch.multinomial(torch.exp(out), 1)\n if way == 'greedy':\n # greedy sampling\n out_indexes = torch.exp(out).max(1)[1].unsqueeze(1)\n\n if samples[:, i - 1].eq(self.tgt_pad).sum() == num_samples:\n break\n # eos_mask = out_indexes.ne(tgt_pad)\n # pre_eos_mask = samples[:,i].ne(tgt_eos).ne(tgt_pad)\n _lens = 1 - samples[:, i - 1].eq(self.tgt_eos) - samples[:, i - 1].eq(self.tgt_pad)\n lengths = lengths + _lens.type_as(lengths) # todo: length is not correct\n\n pad_mask = samples[:, i - 1].eq(self.tgt_pad) + samples[:, i - 1].eq(self.tgt_eos)\n # samples[:,i] = out_indexes.masked_fill_(pad_mask, tgt_pad)\n out_indexes = out_indexes.squeeze(1)\n out_indexes.masked_fill_(pad_mask, self.tgt_pad)\n samples[:, i] = out_indexes\n\n return samples, lengths\n\n def batchNLLLoss(self, src_input, inp, target):\n \"\"\"\n Returns the NLL Loss for predicting target sequence.\n\n Inputs: inp, target\n - inp: batch_size x seq_len\n - target: batch_size x seq_len\n\n inp should be target with <s> (start letter) prepended\n \"\"\"\n weight = torch.ones(self.tgt_vocab_size).cuda()\n weight[self.tgt_pad] = 0\n loss_fn = nn.NLLLoss(weight, reduction='sum') #损失函数按权重相加,并且不平均\n\n batch_size, seq_len = inp.size()\n inp = inp.permute(1, 0) # seq_len x batch_size\n target = target.permute(1, 0) # seq_len x batch_size\n mask_src = src_input[0].eq(self.src_pad).permute(1, 0)\n\n context, enc_hidden = self.encoder(src_input)\n # hi=[-> hi; <- hi]\n h =torch.cat((enc_hidden[0], enc_hidden[1]), dim=-1).unsqueeze(0)\n # h1 = torch.cat((enc_hidden[2], enc_hidden[3]), dim=-1).unsqueeze(0)\n # h = torch.cat((h0, h1), dim=0)\n \"\"\"\n if self.gpu:\n inp = inp.cuda()\n h = h.cuda()\n target = target.cuda()\n \"\"\"\n loss = 0\n for i in range(seq_len):\n out, h = self.forward(inp[i], h, context, mask_src)\n loss += loss_fn(out, target[i])\n\n return loss # per batch\n\n def batchPGLoss(self, src_input, inp, target, rewards):\n \"\"\"\n Returns a pseudo-loss that gives corresponding policy gradients (on calling .backward()).\n Inspired by the example in http://karpathy.github.io/2016/05/31/rl/\n\n Inputs: inp, target\n - inp: batch_size x seq_len\n - target: batch_size x seq_len\n - reward: batch_size (discriminator reward for each sentence, applied to each token of the corresponding\n sentence)\n\n inp should be target with <s> (start letter) prepended\n \"\"\"\n batch_size, seq_len = inp.size()\n inp = inp.permute(1, 0) # seq_len x batch_size\n target = target.permute(1, 0) # seq_len x batch_size\n mask_src = src_input[0].eq(self.src_pad).permute(1, 0)\n context, enc_hidden = self.encoder(src_input)\n # hi=[-> hi; <- hi]\n h = torch.cat((enc_hidden[0],enc_hidden[1]), dim=-1).unsqueeze(0)\n # h1 = torch.cat((enc_hidden[2], enc_hidden[3]), dim=-1).unsqueeze(0)\n # h = torch.cat((h0, h1), dim=0)\n loss = 0\n\n for i in range(seq_len):\n out, h = self.forward(inp[i], h, context, mask_src)\n mask = inp[i].eq(self.tgt_eos).float() + inp[i].eq(self.tgt_pad).float()\n mask = 1 - mask\n if any(mask):\n rewards_i = rewards[i+1] * mask\n # TODO: should h be detached from graph (.detach())?\n for j in range(batch_size):\n loss += -out[j][target.data[i][j]] * rewards_i[j] # log(P(y_t|Y_1:Y_{t-1})) * Q\n else:\n break\n return loss / batch_size" }, { "alpha_fraction": 0.75844806432724, "alphanum_fraction": 0.7834793329238892, "avg_line_length": 29.69230842590332, "blob_id": "d096f124186e69446eeb14f338169a0181770771", "content_id": "5e483d534fb7fe63fbca320f4674c4ea32601563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 799, "license_type": "no_license", "max_line_length": 180, "num_lines": 26, "path": "/README.md", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "## Improving Neural Question Generation with CGAN\n\nThis repository contains code about Improving NQG with CGAN, which adds a discriminator to do adversarial training to improve the performance of neural question generation network.\n\n![Diagram](img/nqg+cgan.jpg)\n\n(image from the reference below)\n\n## About the Code\n\nEnvironment: Pytorch 4.0, Python 3.6 \n\nPackage Requirement: torchtext, nltk, numpy\n\nRun on GPU: Python train.py\n\n## Performance\n\nPre-trained MLE Model: BLEU-4: 8.3\n\nModel after adversarial training: BLEU-4: 8.5\n\nAlthough the BLEU-4 metric is not very high in the experiment, this code provides a basic framework which might be useful.\n\n## Reference\n- [Improving Neural Machine Translation with Conditional Sequence Generative Adversarial Nets](https://arxiv.org/pdf/1703.04887.pdf)\n\n" }, { "alpha_fraction": 0.5540462136268616, "alphanum_fraction": 0.5744219422340393, "avg_line_length": 37.010990142822266, "blob_id": "a18939bc3ca29c6cceb9d1a67813a52c1c24dc17", "content_id": "a9cf6bfe440a394775b383a367c5836dbf5e0d21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6976, "license_type": "no_license", "max_line_length": 129, "num_lines": 182, "path": "/code/evaluation.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "import torch\nfrom nltk.translate import bleu_score\nfrom beam import Beam\nfrom config import CUDA, beamsize, n_Best, max_sent_length, hidden_size\n\ndef translateBatch(srcBatch, model, src_special, tgt_special):\n srcdata= srcBatch[0]\n src_pad = src_special[0]\n src_eos = src_special[2]\n\n batchSize = srcBatch[0].size(1)\n beamSize = beamsize\n tt = torch.cuda if CUDA else torch\n\n mask_src = srcBatch[0].eq(src_pad).permute(1, 0).unsqueeze(0).repeat(beamSize, 1, 1) # 5, batch_size, length\n # (1) run the encoder on the src\n context, enc_hidden = model.encoder(srcBatch)\n # hi=[-> hi; <- hi]\n h0 = torch.cat((enc_hidden[0], enc_hidden[1]), dim=-1).unsqueeze(0)\n # h1 = torch.cat((enc_hidden[2], enc_hidden[3]), dim=-1).unsqueeze(0)\n # decStates = torch.cat((h0, h1), dim=0)\n decStates = h0\n\n decStates = decStates.repeat(1, beamSize, 1) # 1 / 2 * 320 * 512\n context = context.repeat(1, beamSize, 1)\n\n beam = [Beam(beamSize, tgt_special, CUDA) for k in range(batchSize)]\n batchIdx = list(range(batchSize)) # 1 ~ 64\n remainingSents = batchSize # 64\n\n for i in range(max_sent_length):\n # Prepare decoder input.\n input = torch.stack([b.getCurrentState() for b in beam\n if not b.done]).transpose(0, 1).contiguous().view(1, -1) # 1 * 320 all is '2'\n input = input.squeeze(0)\n out_prob, decStates, attn = model.decoder(input, decStates, context, mask_src.view(-1, mask_src.size(2))) # 320 * 20000\n\n # batch x beam x numWords\n wordLk = out_prob.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous() # 64 * 5 *20000\n attn = attn.view(beamSize, remainingSents, -1).transpose(0, 1).contiguous()\n\n active = []\n father_idx = []\n for b in range(batchSize):\n if beam[b].done:\n continue\n\n idx = batchIdx[b]\n if not beam[b].advance(wordLk.data[idx], attn.data[idx]):\n active += [b]\n father_idx.append(beam[b].prevKs[-1]) # this is very annoying\n\n if not active:\n break\n\n # to get the real father index\n real_father_idx = []\n for kk, idx in enumerate(father_idx):\n real_father_idx.append(idx * len(father_idx) + kk)\n\n # in this section, the sentences that are still active are\n # compacted so that the decoder is not run on completed sentences\n activeIdx = tt.LongTensor([batchIdx[k] for k in active]) # select active batch\n batchIdx = {beam: idx for idx, beam in enumerate(active)} # beam : 实际的batch id,idx : active 中的id\n\n def updateActive(t, rnnSize):\n # select only the remaining active sentences\n view = t.data.view(-1, remainingSents, rnnSize)\n newSize = list(t.size())\n newSize[-2] = newSize[-2] * len(activeIdx) // remainingSents # reduce batchsize\n return view.index_select(1, activeIdx).view(*newSize)\n\n # decStates = torch.cat((decStates[0],decStates[1]), dim=-1).unsqueeze(0) # todo:\n decStates = updateActive(decStates, hidden_size) # 1 * 5*remainingsents * 512*2\n context = updateActive(context, hidden_size)\n mask_src = mask_src.index_select(1, activeIdx) # 5 * remainingsents * 98\n\n # set correct state for beam search\n previous_index = torch.stack(real_father_idx).transpose(0, 1).contiguous() #\n decStates = decStates.view(-1, decStates.size(2)).index_select(0, previous_index.view(-1)).view(\n *decStates.size())\n # decStates = torch.cat((decStates[:,:,:hidden_size],decStates[:,:,hidden_size:]), dim=0) # todo:\n remainingSents = len(active)\n\n # (4) package everything up\n allHyp, allScores, allattn = [], [], []\n n_best = n_Best\n\n for b in range(batchSize):\n scores, ks = beam[b].sortBest()\n allScores += [scores[:n_best]]\n valid_attn = srcdata[:, b].ne(src_pad).ne(src_eos).nonzero().squeeze(1)\n hyps, attn = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])\n attn = [a.index_select(1, valid_attn) for a in attn] # batch, n_best, len\n allHyp += [hyps]\n allattn += [attn]\n\n return allHyp, allScores, allattn\n\n\ndef delete_eos(idx, stop):\n idx0 = []\n for i in idx:\n idx0 += [i]\n if i == stop:\n idx0 = idx0[:-1]\n break\n return idx0\n\ndef delete_pad(sent):\n pad = '<pad>'\n i = len(sent) - 1\n while sent[i] == pad:\n del sent[i]\n i -=1\n return sent\n\n\ndef evalModel(model, iterator, epoch, rev, src_special, tgt_special, tgt_ref, src_rev):\n tgt_eos = tgt_special[2]\n\n predict, gold = [], []\n for i, data in enumerate(iterator):\n tgt_data = data.target[0].permute(1, 0) # batch_size X length\n src_data_wrap = data.source\n ans = data.answer[0]\n src_data = src_data_wrap[0].permute(1, 0)\n\n if CUDA:\n scr_data = data.source[0].cuda()\n scr_lengths = data.source[1].cuda()\n ans = ans.cuda()\n src_data_wrap = (scr_data, scr_lengths, ans)\n\n pred, predScore, attn = translateBatch(src_data_wrap, model, src_special, tgt_special)\n # attn: batch, n_best, len_tgt, len_src\n predBatch = []\n for b in range(src_data_wrap[0].size(1)):\n n = 0\n predb = torch.stack(pred[b][n]).cpu()\n predb = delete_eos(predb, tgt_eos)\n att = attn[b][n]\n predb = torch.stack(predb) # todo: 有empty的情况? data有问题/逻辑有问题?(几乎很少发生)\n raw = rev.reverse(predb.unsqueeze(1), src_data[b], att, src_rev) # todo: should add post-process?\n predBatch.append(raw[0].split())\n # nltk BLEU evaluator needs tokenized sentences\n\n # tgt_raw = []\n # for k in range(tgt_data.size(0)):\n # tgt = rev.reverse(tgt_data[k][1:].unsqueeze(1))\n # tgt_raw.append(tgt[0].split())\n #\n # gold += [[r] for r in tgt_raw]\n predict += predBatch\n\n # for i, data in enumerate(val_iter_raw):\n # tgt_data = data.target[0].permute(1, 0) # batch_size X length\n # tgt_raw = []\n # for k in range(tgt_data.size(0)):\n # tgt = rev_raw.reverse(tgt_data[k].unsqueeze(1))\n # tgt_raw.append(tgt[0].split())\n #\n # gold += [[delete_pad(r)] for r in tgt_raw]\n\n tgt_writer = open(\"tgt_output.txt\", \"w\")\n hyp_writer = open(\"hyp_output.txt\", \"w\")\n for i in tgt_ref:\n tgt_writer.write(\"__eos__\".join([\" \".join(i[_id]) for _id in range(len(i))]) + \"\\n\")\n for j in predict:\n hyp_writer.write(\" \".join(j) + \"\\n\")\n\n\n bleu = bleu_score.corpus_bleu(tgt_ref, predict)\n f = open('bleu' + str(epoch) + '.txt', 'w', encoding='utf-8')\n f.write(str(tgt_ref))\n f.write(' ')\n f.write(str(predict))\n f.close()\n\n report_metric = bleu\n\n return report_metric\n\n\n" }, { "alpha_fraction": 0.6041055917739868, "alphanum_fraction": 0.6979472041130066, "avg_line_length": 13.782608985900879, "blob_id": "5c58bf593888de99f65f819cd10b19cf5b15b658", "content_id": "21b46912a9481ab812c83cb9db6de3b30f274981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 40, "num_lines": 23, "path": "/code/config.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "import torch\n\nCUDA=True\ndevice = torch.device(\"cuda:0\")\n# generator\nmax_sent_len = 100 # sample\nhidden_size = 512\n\n# beam_search\nbeamsize = 5\nn_Best = 1\nmax_sent_length = 35 # todo: maybe small\n\n\n# rollout_reward_setting\nlamda = 0.8\nbase = 0.5\n\n# data_setting\ntgt_vocab_size = 28000\nsrc_vocab_size = 45000\nbatch_size = 64\ndata_name = 86636\n\n" }, { "alpha_fraction": 0.58046954870224, "alphanum_fraction": 0.5845525860786438, "avg_line_length": 36.67948532104492, "blob_id": "3c0052ff2c06a2d7f4492b0235a959ae15f62ec0", "content_id": "3b4531b076e2ca2cea7cd4c4672ff250c694543c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2939, "license_type": "no_license", "max_line_length": 114, "num_lines": 78, "path": "/code/Optim.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "import torch.optim as optim\nfrom torch.nn.utils import clip_grad_norm_\nimport myAdam\n\n\nclass Optim(object):\n def set_parameters(self, params):\n self.params = list(params) # careful: params may be a generator\n if self.method == 'sgd':\n self.optimizer = optim.SGD(self.params, lr=self.lr)\n elif self.method == 'adagrad':\n self.optimizer = optim.Adagrad(self.params, lr=self.lr)\n elif self.method == 'adadelta':\n self.optimizer = optim.Adadelta(self.params, lr=self.lr)\n elif self.method == 'myadam':\n self.optimizer = myAdam.MyAdam(self.params, lr=self.lr)\n elif self.method == 'adam':\n self.optimizer = optim.Adam(self.params, lr=self.lr)\n\n else:\n raise RuntimeError(\"Invalid optim method: \" + self.method)\n\n def __init__(self, method, lr, lr_decay=None, start_decay_at=None, max_weight_value=None, max_grad_norm=None):\n self.lr = lr\n self.max_grad_norm = max_grad_norm\n self.max_weight_value = max_weight_value\n self.method = method\n self.lr_decay = lr_decay\n self.start_decay_at = start_decay_at\n self.best_metric = 0\n self.bad_count = 0\n self.decay_bad_count = 3\n\n def step(self):\n # Compute gradients norm.\n if self.max_grad_norm:\n clip_grad_norm_(self.params, self.max_grad_norm)\n self.optimizer.step()\n if self.max_weight_value:\n for p in self.params:\n p.data.clamp_(0 - self.max_weight_value, self.max_weight_value)\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def _updateLearningRate(self, epoch):\n if self.start_decay_at is not None and self.start_decay_at == epoch:\n self.lr = self.lr * self.lr_decay\n print(\"Decaying learning rate to %g\" % self.lr)\n\n def updateLearningRate(self, bleu):\n # if self.start_decay_at is not None and epoch >= self.start_decay_at:\n # self.start_decay = True\n # if self.last_ppl is not None and ppl > self.last_ppl:\n # self.start_decay = True\n #\n # if self.start_decay:\n # self.lr = self.lr * self.lr_decay\n # print(\"Decaying learning rate to %g\" % self.lr)\n\n # self.last_ppl = ppl\n if bleu >= self.best_metric:\n self.best_metric = bleu\n self.bad_count = 0\n else:\n self.bad_count += 1\n\n if self.bad_count >= self.decay_bad_count and self.lr >= 1e-4:\n self.lr = self.lr * self.lr_decay\n print(\"Decaying learning rate to %g\" % self.lr)\n self.bad_count = 0\n self.optimizer.param_groups[0]['lr'] = self.lr\n\n def reset_learningrate(self, learningrate):\n self.lr = learningrate\n self.best_metric = 0\n print(\"Resetting learning rate to %g\" % self.lr)\n self.optimizer.param_groups[0]['lr'] = self.lr\n" }, { "alpha_fraction": 0.5680801272392273, "alphanum_fraction": 0.5805795192718506, "avg_line_length": 42.2060546875, "blob_id": "62dfdec6bf0165a6abf91dffee65fc924416a0ed", "content_id": "22f1504d28da162cd90597989bcd14760acf4ae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44244, "license_type": "no_license", "max_line_length": 120, "num_lines": 1024, "path": "/code/discriminator.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "import torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nimport torch.nn.functional as F\nfrom torch.nn import Conv1d, Linear\nfrom torch.nn.parameter import Parameter\n\nfrom typing import Sequence, Dict, List, Callable, Tuple, Optional\nimport math\nimport numpy as np\n\nfrom data import tgt_vocab_size, src_vocab_size\nimport util\n\nclass Discriminator(nn.Module):\n def __init__(self, emb_src, emb_tgt, emb_ans, gpu=False, dropout=0.5):\n super(Discriminator, self).__init__()\n # self.hidden_dim = 256\n self.embedding_dim = 300\n self.ans_dim = 16\n\n # self.gpu = gpu\n self.tgt_vocab_size = tgt_vocab_size + 4\n self.src_vocab_size = src_vocab_size + 3\n self.ans_vocab_size = 6\n # self.embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)\n # self.gru = nn.GRU(self.embedding_dim, self.hidden_dim, num_layers=2, dropout=dropout)\n # self.gru2hidden = nn.Linear(2*self.hidden_dim, self.hidden_dim)\n # self.dropout_linear = nn.Dropout(p=dropout)\n # self.hidden2out = nn.Linear(self.hidden_dim, 1)\n self.emb_ans=emb_ans\n\n self.kernel_num_pa = 40\n self.kernel_num_qu = 40\n self.kernel_widths = [2, 3, 4, 5] # todo: need to change?\n self.embeddings_src = emb_src\n self.convs1 = nn.ModuleList([nn.Conv2d(1, self.kernel_num_qu, (kernel_width, self.embedding_dim))\n for kernel_width in self.kernel_widths])\n\n self.embeddings_tgt = emb_tgt\n self.convs2 = nn.ModuleList([nn.Conv2d(1, self.kernel_num_pa, (kernel_width, self.embedding_dim + self.ans_dim))\n for kernel_width in self.kernel_widths])\n self.dropout = nn.Dropout(0.3)\n self.bn_qu = nn.BatchNorm2d(self.kernel_num_qu)\n self.bn_pa = nn.BatchNorm2d(self.kernel_num_pa)\n self.fc1 = nn.Linear(len(self.kernel_widths)*self.kernel_num_pa + len(self.kernel_widths)*self.kernel_num_qu, 1)\n\n def init_hidden(self, batch_size):\n h = autograd.Variable(torch.zeros(1, batch_size, self.hidden_dim))\n\n if self.gpu:\n return h.cuda()\n else:\n return h\n\n def __forward(self, query, passage):\n query = query[0]\n x = self.embeddings_tgt(query) # (N, W, D)\n x = x.unsqueeze(1) # (N, Ci, W, D)\n x = [F.relu(self.bn_qu(conv(x))).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)\n x = torch.cat(x, 1)\n\n ans = passage[1]\n ans = self.emb_ans(ans)\n pa = passage[0]\n y = self.embeddings_src(pa) # (N, W, D)\n y = torch.cat((y, ans), dim=-1)\n y = y.unsqueeze(1) # (N, Ci, W, D)\n y = [F.relu(self.bn_qu(conv(y))).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)\n y = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in y] # [(N, Co), ...]*len(Ks)\n y = torch.cat(y, 1)\n\n p = torch.cat((x, y), 1)\n logit = torch.sigmoid(self.fc1(p))\n return logit\n\n def _forward(self, input, hidden):\n \"\"\"\n input: (data, lengths)\n \"\"\"\n lengths = input[1].tolist()\n src_data = input[0]\n\n emb = self.embeddings(src_data) # batch_size x seq_len x embedding_dim\n emb = emb.permute(1, 0, 2) # seq_len x batch_size x embedding_dim\n emb = pack(emb, lengths)\n _, hidden = self.gru(emb, hidden) # 4 x batch_size x hidden_dim\n hidden = hidden.permute(1, 0, 2).contiguous() # batch_size x 4 x hidden_dim\n out = self.gru2hidden(hidden.view(-1, 2*self.hidden_dim)) # batch_size x 4*hidden_dim\n out = torch.tanh(out)\n out = self.dropout_linear(out)\n out = self.hidden2out(out) # batch_size x 1\n out = torch.sigmoid(out)\n return out\n\n def forward(self, query, passage):\n query = query[0]\n x = self.embeddings_tgt(query) # (N, W, D)\n x = x.unsqueeze(1) # (N, Ci, W, D)\n x = [F.relu(self.bn_qu(conv(x))).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)\n x = torch.cat(x, 1)\n\n ans = passage[1]\n ans = self.emb_ans(ans)\n pa = passage[0]\n y = self.embeddings_src(pa) # (N, W, D)\n y = torch.cat((y, ans), dim=-1)\n y = y.unsqueeze(1) # (N, Ci, W, D)\n y = [F.relu(self.bn_pa(conv(y))).squeeze(3) for conv in self.convs2] # [(N, Co, W), ...]*len(Ks)\n y = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in y] # [(N, Co), ...]*len(Ks)\n y = torch.cat(y, 1)\n\n p = torch.cat((x, y), 1)\n logit = torch.sigmoid(self.fc1(p))\n return logit\n\n def batchClassify(self, inp, passage):\n \"\"\"\n Classifies a batch of sequences.\n\n Inputs: inp\n - inp: batch_size x seq_len\n\n Returns: out\n - out: batch_size ([0,1] score)\n \"\"\"\n # h = self.init_hidden(inp[0].size()[0])\n out = self.forward(inp, passage)\n return out.view(-1)\n\n\nfrom bert import BidirectionalTransformerEncoder\n\n\nclass LinearSimilarity(nn.Module):\n \"\"\"\n This similarity function performs a dot product between a vector of weights and some\n combination of the two input vectors, followed by an (optional) activation function. The\n combination used is configurable.\n If the two vectors are ``x`` and ``y``, we allow the following kinds of combinations: ``x``,\n ``y``, ``x*y``, ``x+y``, ``x-y``, ``x/y``, where each of those binary operations is performed\n elementwise. You can list as many combinations as you want, comma separated. For example, you\n might give ``x,y,x*y`` as the ``combination`` parameter to this class. The computed similarity\n function would then be ``w^T [x; y; x*y] + b``, where ``w`` is a vector of weights, ``b`` is a\n bias parameter, and ``[;]`` is vector concatenation.\n Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the\n similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can\n accomplish that with this class by using \"x*y\" for `combination`.\n Parameters\n ----------\n tensor_1_dim : ``int``\n The dimension of the first tensor, ``x``, described above. This is ``x.size()[-1]`` - the\n length of the vector that will go into the similarity computation. We need this so we can\n build weight vectors correctly.\n tensor_2_dim : ``int``\n The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the\n length of the vector that will go into the similarity computation. We need this so we can\n build weight vectors correctly.\n combination : ``str``, optional (default=\"x,y\")\n Described above.\n activation : ``Activation``, optional (default=linear (i.e. no activation))\n An activation function applied after the ``w^T * [x;y] + b`` calculation. Default is no\n activation.\n \"\"\"\n def __init__(self,\n tensor_1_dim: int,\n tensor_2_dim: int,\n combination: str = 'x,y',\n activation = None) -> None:\n super(LinearSimilarity, self).__init__()\n self._combination = combination\n combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])\n self._weight_vector = Parameter(torch.Tensor(combined_dim))\n self._bias = Parameter(torch.Tensor(1))\n # self._activation = lambda: lambda x: x()\n self.reset_parameters()\n\n def reset_parameters(self):\n std = math.sqrt(6 / (self._weight_vector.size(0) + 1))\n self._weight_vector.data.uniform_(-std, std)\n self._bias.data.fill_(0)\n\n def forward(self, tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:\n combined_tensors = util.combine_tensors(self._combination, [tensor_1, tensor_2])\n dot_product = torch.matmul(combined_tensors, self._weight_vector)\n # return self._activation(dot_product + self._bias)\n return dot_product + self._bias\n\nclass DotProductSimilarity(nn.Module):\n \"\"\"\n This similarity function simply computes the dot product between each pair of vectors, with an\n optional scaling to reduce the variance of the output elements.\n\n Parameters\n ----------\n scale_output : ``bool``, optional\n If ``True``, we will scale the output by ``math.sqrt(tensor.size(-1))``, to reduce the\n variance in the result.\n \"\"\"\n def __init__(self, scale_output: bool = False) -> None:\n super(DotProductSimilarity, self).__init__()\n self._scale_output = scale_output\n\n\n def forward(self, tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:\n result = (tensor_1 * tensor_2).sum(dim=-1)\n if self._scale_output:\n result *= math.sqrt(tensor_1.size(-1))\n return result\n\nclass LegacyMatrixAttention(nn.Module):\n \"\"\"\n The legacy implementation of ``MatrixAttention``.\n It should be considered deprecated as it uses much more memory than the newer specialized\n ``MatrixAttention`` modules.\n Parameters\n ----------\n similarity_function: ``SimilarityFunction``, optional (default=``DotProductSimilarity``)\n The similarity function to use when computing the attention.\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n # self._similarity_function = LinearSimilarity(tensor_1_dim=600,\n # tensor_2_dim=600,\n # combination=\"x,y,x*y\")\n self._similarity_function = DotProductSimilarity()\n\n def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:\n tiled_matrix_1 = matrix_1.unsqueeze(2).expand(matrix_1.size()[0],\n matrix_1.size()[1],\n matrix_2.size()[1],\n matrix_1.size()[2])\n tiled_matrix_2 = matrix_2.unsqueeze(1).expand(matrix_2.size()[0],\n matrix_1.size()[1],\n matrix_2.size()[1],\n matrix_2.size()[2])\n return self._similarity_function(tiled_matrix_1, tiled_matrix_2)\n\nclass Highway(torch.nn.Module):\n \"\"\"\n A `Highway layer <https://arxiv.org/abs/1505.00387>`_ does a gated combination of a linear\n transformation and a non-linear transformation of its input. :math:`y = g * x + (1 - g) *\n f(A(x))`, where :math:`A` is a linear transformation, :math:`f` is an element-wise\n non-linearity, and :math:`g` is an element-wise gate, computed as :math:`sigmoid(B(x))`.\n This module will apply a fixed number of highway layers to its input, returning the final\n result.\n Parameters\n ----------\n input_dim : ``int``\n The dimensionality of :math:`x`. We assume the input has shape ``(batch_size, ...,\n input_dim)``.\n num_layers : ``int``, optional (default=``1``)\n The number of highway layers to apply to the input.\n activation : ``Callable[[torch.Tensor], torch.Tensor]``, optional (default=``torch.nn.functional.relu``)\n The non-linearity to use in the highway layers.\n \"\"\"\n def __init__(self,\n input_dim: int,\n num_layers: int = 1,\n activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu) -> None:\n super(Highway, self).__init__()\n self._input_dim = input_dim\n self._layers = torch.nn.ModuleList([torch.nn.Linear(input_dim, input_dim * 2)\n for _ in range(num_layers)])\n self._activation = activation\n for layer in self._layers:\n # We should bias the highway layer to just carry its input forward. We do that by\n # setting the bias on `B(x)` to be positive, because that means `g` will be biased to\n # be high, so we will carry the input forward. The bias on `B(x)` is the second half\n # of the bias vector in each Linear layer.\n layer.bias[input_dim:].data.fill_(1)\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor: # pylint: disable=arguments-differ\n current_input = inputs\n for layer in self._layers:\n projected_input = layer(current_input)\n linear_part = current_input\n # NOTE: if you modify this, think about whether you should modify the initialization\n # above, too.\n nonlinear_part, gate = projected_input.chunk(2, dim=-1)\n nonlinear_part = self._activation(nonlinear_part)\n gate = torch.sigmoid(gate)\n current_input = gate * linear_part + (1 - gate) * nonlinear_part\n return current_input\n\nclass MaskedLayerNorm(torch.nn.Module):\n def __init__(self, size: int, gamma0: float = 0.1, eps: float = 1e-6) -> None:\n super().__init__()\n self.gamma = torch.nn.Parameter(torch.ones(1, 1, size) * gamma0)\n self.beta = torch.nn.Parameter(torch.zeros(1, 1, size))\n self.size = size\n self.eps = eps\n\n def forward(self, tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n # pylint: disable=arguments-differ\n broadcast_mask = mask.unsqueeze(-1).float()\n num_elements = broadcast_mask.sum() * self.size\n mean = (tensor * broadcast_mask).sum() / num_elements\n masked_centered = (tensor - mean) * broadcast_mask\n std = torch.sqrt(\n (masked_centered * masked_centered).sum() / num_elements + self.eps\n )\n return self.gamma * (tensor - mean) / (std + self.eps) + self.beta\n\n_VALID_PROJECTION_LOCATIONS = {'after_cnn', 'after_highway', None}\nclass CnnHighwayEncoder(nn.Module):\n \"\"\"\n The character CNN + highway encoder from Kim et al \"Character aware neural language models\"\n https://arxiv.org/abs/1508.06615\n with an optional projection.\n Parameters\n ----------\n embedding_dim: int\n The dimension of the initial character embedding.\n filters: ``Sequence[Sequence[int]]``\n A sequence of pairs (filter_width, num_filters).\n num_highway: int\n The number of highway layers.\n projection_dim: int\n The output dimension of the projection layer.\n activation: str, optional (default = 'relu')\n The activation function for the convolutional layers.\n projection_location: str, optional (default = 'after_highway')\n Where to apply the projection layer. Valid values are\n 'after_highway', 'after_cnn', and None.\n do_layer_norm: bool, optional (default = False)\n If True, we apply ``MaskedLayerNorm`` to the final encoded result.\n \"\"\"\n def __init__(self,\n embedding_dim: int,\n filters: Sequence[Sequence[int]],\n num_highway: int,\n projection_dim: int,\n activation: str = 'relu',\n projection_location: str = 'after_highway',\n do_layer_norm: bool = False) -> None:\n super().__init__()\n\n if projection_location not in _VALID_PROJECTION_LOCATIONS:\n raise ConfigurationError(f\"unknown projection location: {projection_location}\")\n\n self.input_dim = embedding_dim\n self.output_dim = projection_dim\n self._projection_location = projection_location\n\n if activation == 'tanh':\n self._activation = torch.nn.functional.tanh\n elif activation == 'relu':\n self._activation = torch.nn.functional.relu\n else:\n raise ConfigurationError(f\"unknown activation {activation}\")\n\n # Create the convolutions\n self._convolutions: List[torch.nn.Module] = []\n for i, (width, num) in enumerate(filters):\n conv = torch.nn.Conv1d(in_channels=embedding_dim,\n out_channels=num,\n kernel_size=width,\n bias=True)\n conv.weight.data.uniform_(-0.05, 0.05)\n conv.bias.data.fill_(0.0)\n self.add_module(f\"char_conv_{i}\", conv) # needs to match the old ELMo name\n self._convolutions.append(conv)\n\n # Create the highway layers\n num_filters = sum(num for _, num in filters)\n if projection_location == 'after_cnn':\n highway_dim = projection_dim\n else:\n # highway_dim is the number of cnn filters\n highway_dim = num_filters\n self._highways = Highway(highway_dim, num_highway, activation=torch.nn.functional.relu)\n for highway_layer in self._highways._layers: # pylint: disable=protected-access\n # highway is a linear layer for each highway layer\n # with fused W and b weights\n highway_layer.weight.data.normal_(mean=0.0, std=np.sqrt(1.0 / highway_dim))\n highway_layer.bias[:highway_dim].data.fill_(0.0)\n highway_layer.bias[highway_dim:].data.fill_(2.0)\n\n # Projection layer: always num_filters -> projection_dim\n self._projection = torch.nn.Linear(num_filters, projection_dim, bias=True)\n self._projection.weight.data.normal_(mean=0.0, std=np.sqrt(1.0 / num_filters))\n self._projection.bias.data.fill_(0.0)\n\n # And add a layer norm\n if do_layer_norm:\n self._layer_norm: Callable = MaskedLayerNorm(self.output_dim, gamma0=0.1)\n else:\n self._layer_norm = lambda tensor, mask: tensor\n\n def forward(self,\n inputs: torch.Tensor,\n mask: torch.Tensor) -> Dict[str, torch.Tensor]:\n \"\"\"\n Compute context insensitive token embeddings for ELMo representations.\n Parameters\n ----------\n inputs:\n Shape ``(batch_size, num_tokens, embedding_dim)``\n of character embeddings representing the current batch.\n mask:\n Shape ``(batch_size, num_tokens)``\n mask for the current batch.\n Returns\n -------\n ``encoding``:\n Shape ``(batch_size, sequence_length, embedding_dim2)`` tensor\n with context-insensitive token representations. If bos_characters and eos_characters\n are being added, the second dimension will be ``sequence_length + 2``.\n \"\"\"\n # pylint: disable=arguments-differ\n\n # convolutions want (batch_size, embedding_dim, num_tokens)\n inputs = inputs.transpose(1, 2)\n\n convolutions = []\n for i in range(len(self._convolutions)):\n char_conv_i = getattr(self, f\"char_conv_{i}\")\n convolved = char_conv_i(inputs)\n\n # (batch_size, n_filters for this width)\n convolved, _ = torch.max(convolved, dim=-1)\n convolved = self._activation(convolved)\n convolutions.append(convolved)\n\n # (batch_size, n_filters)\n token_embedding = torch.cat(convolutions, dim=-1)\n\n if self._projection_location == 'after_cnn':\n token_embedding = self._projection(token_embedding)\n\n # apply the highway layers (batch_size, highway_dim)\n token_embedding = self._highways(token_embedding)\n\n if self._projection_location == 'after_highway':\n # final projection (batch_size, embedding_dim)\n token_embedding = self._projection(token_embedding)\n\n # Apply layer norm if appropriate\n token_embedding = self._layer_norm(token_embedding, mask)\n\n return token_embedding\n\n def get_input_dim(self) -> int:\n return self.input_dim\n\n def get_output_dim(self) -> int:\n return self.output_dim\n\nclass CnnEncoder(nn.Module):\n \"\"\"\n A ``CnnEncoder`` is a combination of multiple convolution layers and max pooling layers. As a\n :class:`Seq2VecEncoder`, the input to this module is of shape ``(batch_size, num_tokens,\n input_dim)``, and the output is of shape ``(batch_size, output_dim)``.\n The CNN has one convolution layer for each ngram filter size. Each convolution operation gives\n out a vector of size num_filters. The number of times a convolution layer will be used\n is ``num_tokens - ngram_size + 1``. The corresponding maxpooling layer aggregates all these\n outputs from the convolution layer and outputs the max.\n This operation is repeated for every ngram size passed, and consequently the dimensionality of\n the output after maxpooling is ``len(ngram_filter_sizes) * num_filters``. This then gets\n (optionally) projected down to a lower dimensional output, specified by ``output_dim``.\n We then use a fully connected layer to project in back to the desired output_dim. For more\n details, refer to \"A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural\n Networks for Sentence Classification\", Zhang and Wallace 2016, particularly Figure 1.\n Parameters\n ----------\n embedding_dim : ``int``\n This is the input dimension to the encoder. We need this because we can't do shape\n inference in pytorch, and we need to know what size filters to construct in the CNN.\n num_filters: ``int``\n This is the output dim for each convolutional layer, which is the number of \"filters\"\n learned by that layer.\n ngram_filter_sizes: ``Tuple[int]``, optional (default=``(2, 3, 4, 5)``)\n This specifies both the number of convolutional layers we will create and their sizes. The\n default of ``(2, 3, 4, 5)`` will have four convolutional layers, corresponding to encoding\n ngrams of size 2 to 5 with some number of filters.\n conv_layer_activation: ``Activation``, optional (default=``torch.nn.ReLU``)\n Activation to use after the convolution layers.\n output_dim : ``Optional[int]``, optional (default=``None``)\n After doing convolutions and pooling, we'll project the collected features into a vector of\n this size. If this value is ``None``, we will just return the result of the max pooling,\n giving an output of shape ``len(ngram_filter_sizes) * num_filters``.\n \"\"\"\n def __init__(self,\n embedding_dim: int,\n num_filters: int,\n ngram_filter_sizes: Tuple[int, ...] = (3,5), # pylint: disable=bad-whitespace\n conv_layer_activation = None,\n output_dim: Optional[int] = None) -> None:\n super(CnnEncoder, self).__init__()\n self._embedding_dim = embedding_dim\n self._num_filters = num_filters\n self._ngram_filter_sizes = ngram_filter_sizes\n self._activation = nn.ReLU()\n self._output_dim = output_dim\n\n self._convolution_layers = [Conv1d(in_channels=self._embedding_dim,\n out_channels=self._num_filters,\n kernel_size=ngram_size)\n for ngram_size in self._ngram_filter_sizes]\n for i, conv_layer in enumerate(self._convolution_layers):\n self.add_module('conv_layer_%d' % i, conv_layer)\n\n maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)\n if self._output_dim:\n self.projection_layer = Linear(maxpool_output_dim, self._output_dim)\n else:\n self.projection_layer = None\n self._output_dim = maxpool_output_dim\n\n def get_input_dim(self) -> int:\n return self._embedding_dim\n\n def get_output_dim(self) -> int:\n return self._output_dim\n\n def forward(self, tokens: torch.Tensor, mask: torch.Tensor): # pylint: disable=arguments-differ\n if mask is not None:\n tokens = tokens * mask.unsqueeze(-1).float()\n\n # Our input is expected to have shape `(batch_size, num_tokens, embedding_dim)`. The\n # convolution layers expect input of shape `(batch_size, in_channels, sequence_length)`,\n # where the conv layer `in_channels` is our `embedding_dim`. We thus need to transpose the\n # tensor first.\n tokens = torch.transpose(tokens, 1, 2)\n # Each convolution layer returns output of size `(batch_size, num_filters, pool_length)`,\n # where `pool_length = num_tokens - ngram_size + 1`. We then do an activation function,\n # then do max pooling over each filter for the whole input sequence. Because our max\n # pooling is simple, we just use `torch.max`. The resultant tensor of has shape\n # `(batch_size, num_conv_layers * num_filters)`, which then gets projected using the\n # projection layer, if requested.\n\n filter_outputs = []\n for i in range(len(self._convolution_layers)):\n convolution_layer = getattr(self, 'conv_layer_{}'.format(i))\n filter_outputs.append(\n self._activation(convolution_layer(tokens)).max(dim=2)[0]\n )\n\n # Now we have a list of `num_conv_layers` tensors of shape `(batch_size, num_filters)`.\n # Concatenating them gives us a tensor of shape `(batch_size, num_filters * num_conv_layers)`.\n maxpool_output = torch.cat(filter_outputs, dim=1) if len(filter_outputs) > 1 else filter_outputs[0]\n\n if self.projection_layer:\n result = self.projection_layer(maxpool_output)\n else:\n result = maxpool_output\n return result\n\nclass PQANet(nn.Module):\n def __init__(self, emb_src, emb_tgt):\n super(PQANet, self).__init__()\n self.embeddings_src = emb_src\n self.embeddings_tgt = emb_tgt\n\n self.passage_encoder = BidirectionalTransformerEncoder(input_dim=300,\n hidden_dim=2048,\n num_layers=1)\n self.query_encoder = BidirectionalTransformerEncoder(input_dim=300,\n hidden_dim=2048,\n num_layers=1)\n # self.query_encoder = self.passage_encoder\n\n self._matrix_attention = LegacyMatrixAttention()\n\n # self.combine = CnnEncoder(embedding_dim=600, num_filters=100)\n\n # self.max_\n self.linear = nn.Linear(600, 1)\n self.sigmoid = nn.Sigmoid()\n\n # def forward(self, passage, query, passage_mask, query_mask):\n def forward(self, query, passage):\n\n # 0.\n passage, passage_length = passage\n batch_size = passage.size(0)\n passage_length = passage.size(1)\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n\n # 0.1 Encoding\n embedded_query = self.embeddings_tgt(query) # (N, W, D)\n embedded_passage = self.embeddings_src(passage)\n\n # 1. Separately encoding.\n encoded_passage = self.passage_encoder(embedded_passage, passage_mask)\n encoded_query = self.query_encoder(embedded_query, query_mask)\n encoding_dim = encoded_query.size(-1)\n\n # maxpooled_passage = F.max_pool1d(encoded_passage.transpose(1,2), encoded_passage.size(1)).squeeze(2)\n # maxpooled_query = F.max_pool1d(encoded_query.transpose(1,2), encoded_query.size(1)).squeeze(2)\n #\n # output = torch.cat((maxpooled_passage, maxpooled_query), 1)\n output = torch.mean(encoded_query, 1)\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def __forward(self, query, passage):\n\n # 0.\n passage, passage_length = passage\n batch_size = passage.size(0)\n passage_length = passage.size(1)\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n\n # 0.1 Encoding\n embedded_query = self.embeddings_tgt(query) # (N, W, D)\n embedded_passage = self.embeddings_src(passage)\n\n # 1. Separately encoding.\n encoded_passage = self.passage_encoder(embedded_passage, passage_mask)\n encoded_query = self.query_encoder(embedded_query, query_mask)\n encoding_dim = encoded_query.size(-1)\n\n # 2. Interaction.\n\n # Shape: (batch_size, passage_length, query_length)\n passage_query_similarity = self._matrix_attention(encoded_passage, encoded_query)\n # Shape: (batch_size, passage_length, query_length)\n passage_query_attention = util.masked_softmax(passage_query_similarity, query_mask)\n # Shape: (batch_size, passage_length, encoding_dim)\n passage_query_vectors = util.weighted_sum(encoded_query, passage_query_attention)\n\n # We replace masked values with something really negative here, so they don't affect the\n # max below.\n masked_similarity = util.replace_masked_values(passage_query_similarity,\n query_mask.unsqueeze(1),\n -1e7)\n # Shape: (batch_size, passage_length)\n query_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)\n # Shape: (batch_size, passage_length)\n query_passage_attention = util.masked_softmax(query_passage_similarity, passage_mask)\n # Shape: (batch_size, encoding_dim)\n query_passage_vector = util.weighted_sum(encoded_passage, query_passage_attention)\n # Shape: (batch_size, passage_length, encoding_dim)\n tiled_query_passage_vector = query_passage_vector.unsqueeze(1).expand(batch_size,\n passage_length,\n encoding_dim)\n\n # Shape: (batch_size, passage_length, encoding_dim * 4)\n final_merged_passage = torch.cat([encoded_passage,\n passage_query_vectors,\n encoded_passage * passage_query_vectors,\n encoded_passage * tiled_query_passage_vector],\n dim=-1)\n\n # 3. Compress Composition Mix ... ? or just max_pooling or mean\n # output = self.combine(final_merged_passage, passage_mask)\n output = torch.mean(final_merged_passage, 1)\n\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def _forward(self, query, passage):\n\n # 0.\n passage, passage_length = passage\n batch_size = passage.size(0)\n passage_length = passage.size(1)\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n\n # 0.1 Encoding\n embedded_query = self.embeddings_tgt(query) # (N, W, D)\n\n # 1. Separately encoding.\n encoded_query = self.query_encoder(embedded_query, query_mask)\n encoding_dim = encoded_query.size(-1)\n\n\n # 3. Compress Composition Mix ... ? or just max_pooling or mean\n output = self.combine(encoded_query, query_mask)\n # output = torch.mean(encoded_query, 1)\n\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def batchClassify(self, inp, passage):\n \"\"\"\n Classifies a batch of sequences.\n\n Inputs: inp\n - inp: batch_size x seq_len\n\n Returns: out\n - out: batch_size ([0,1] score)\n \"\"\"\n # h = self.init_hidden(inp[0].size()[0])\n out = self.forward(inp, passage)\n return out.view(-1)\n\n\nfrom transformer import TransformerEncoder\nclass TransormerNet(nn.Module):\n def __init__(self, emb_src, emb_tgt):\n super(TransormerNet, self).__init__()\n self.embeddings_src = emb_src\n self.embeddings_tgt = emb_tgt\n\n self.passage_encoder = TransformerEncoder(num_layers=2, d_model=300, heads=10, d_ff=2048,\n dropout=0.1, embeddings=emb_src)\n self.query_encoder = TransformerEncoder(num_layers=2, d_model=300, heads=10, d_ff=2048,\n dropout=0.1, embeddings=emb_src)\n # self.query_encoder = self.passage_encoder\n\n self._matrix_attention = LegacyMatrixAttention()\n\n self.combine = CnnEncoder(embedding_dim=1200, num_filters=100)\n\n # self.max_\n self.linear = nn.Linear(200, 1)\n self.sigmoid = nn.Sigmoid()\n\n # def forward(self, passage, query, passage_mask, query_mask):\n def ___forward(self, query, passage):\n\n # 0.\n passage, passage_length = passage\n batch_size = passage.size(0)\n passage_length = passage.size(1)\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n\n # 0.1 Encoding\n # embedded_query = self.embeddings_tgt(query) # (N, W, D)\n # embedded_passage = self.embeddings_src(passage)\n\n # 1. Separately encoding.\n encoded_passage = self.passage_encoder(passage, passage_mask)\n encoded_query = self.query_encoder(query, query_mask)\n encoding_dim = encoded_query.size(-1)\n\n # maxpooled_passage = F.max_pool1d(encoded_passage.transpose(1,2), encoded_passage.size(1)).squeeze(2)\n # maxpooled_query = F.max_pool1d(encoded_query.transpose(1,2), encoded_query.size(1)).squeeze(2)\n # output = torch.cat((maxpooled_passage, maxpooled_query), 1)\n\n mean_passage = torch.mean(encoded_passage, 1)\n mean_query = torch.mean(encoded_query, 1)\n output = torch.cat((mean_passage, mean_query), 1)\n\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def forward(self, query, passage):\n\n # 0.\n passage, passage_length = passage\n batch_size = passage.size(0)\n passage_length = passage.size(1)\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n\n\n # 0.1 Encoding\n # embedded_query = self.embeddings_tgt(query) # (N, W, D)\n # embedded_passage = self.embeddings_src(passage)\n\n # 1. Separately encoding.\n\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n encoded_passage = self.passage_encoder(passage, passage_mask)\n encoded_query = self.query_encoder(query, query_mask)\n encoding_dim = encoded_query.size(-1)\n\n # 2. Interaction.\n\n # Shape: (batch_size, passage_length, query_length)\n passage_query_similarity = self._matrix_attention(encoded_passage, encoded_query)\n # Shape: (batch_size, passage_length, query_length)\n passage_query_attention = util.masked_softmax(passage_query_similarity, query_mask)\n # Shape: (batch_size, passage_length, encoding_dim)\n passage_query_vectors = util.weighted_sum(encoded_query, passage_query_attention)\n\n # We replace masked values with something really negative here, so they don't affect the\n # max below.\n masked_similarity = util.replace_masked_values(passage_query_similarity,\n query_mask.unsqueeze(1),\n -1e7)\n # Shape: (batch_size, passage_length)\n query_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)\n # Shape: (batch_size, passage_length)\n query_passage_attention = util.masked_softmax(query_passage_similarity, passage_mask)\n # Shape: (batch_size, encoding_dim)\n query_passage_vector = util.weighted_sum(encoded_passage, query_passage_attention)\n # Shape: (batch_size, passage_length, encoding_dim)\n tiled_query_passage_vector = query_passage_vector.unsqueeze(1).expand(batch_size,\n passage_length,\n encoding_dim)\n\n # Shape: (batch_size, passage_length, encoding_dim * 4)\n final_merged_passage = torch.cat([encoded_passage,\n passage_query_vectors,\n encoded_passage * passage_query_vectors,\n encoded_passage * tiled_query_passage_vector],\n dim=-1)\n\n # 3. Compress Composition Mix ... ? or just max_pooling or mean\n # output = self.combine(final_merged_passage, passage_mask)\n output = torch.mean(final_merged_passage, 1)\n\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def _forward(self, query, passage):\n\n # 0.\n passage, passage_length = passage\n batch_size = passage.size(0)\n passage_length = passage.size(1)\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n\n # 0.1 Encoding\n embedded_query = self.embeddings_tgt(query) # (N, W, D)\n\n # 1. Separately encoding.\n encoded_query = self.query_encoder(embedded_query, query_mask)\n encoding_dim = encoded_query.size(-1)\n\n\n # 3. Compress Composition Mix ... ? or just max_pooling or mean\n output = self.combine(encoded_query, query_mask)\n # output = torch.mean(encoded_query, 1)\n\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def batchClassify(self, inp, passage):\n \"\"\"\n Classifies a batch of sequences.\n\n Inputs: inp\n - inp: batch_size x seq_len\n\n Returns: out\n - out: batch_size ([0,1] score)\n \"\"\"\n # h = self.init_hidden(inp[0].size()[0])\n out = self.forward(inp, passage)\n return out.view(-1)\n\nclass LinearSeqAttn(nn.Module):\n \"\"\"Self attention over a sequence:\n * o_i = softmax(Wx_i) for x_i in X.\n \"\"\"\n\n def __init__(self, input_size):\n super(LinearSeqAttn, self).__init__()\n self.linear = nn.Linear(input_size, 1)\n\n def forward(self, x, x_mask):\n \"\"\"\n Args:\n x: batch * len * hdim\n x_mask: batch * len (1 for padding, 0 for true)\n Output:\n alpha: batch * len\n \"\"\"\n x_flat = x.view(-1, x.size(-1))\n scores = self.linear(x_flat).view(x.size(0), x.size(1))\n scores.data.masked_fill_(x_mask.data, -float('inf'))\n alpha = F.softmax(scores, dim=-1)\n return alpha\n\nclass BiLSTM(nn.Module):\n def __init__(self, emb_src, emb_tgt, emb_ans):\n super(BiLSTM, self).__init__()\n self.embeddings_src = emb_src\n self.embeddings_tgt = emb_tgt\n self.embeddings_ans = emb_ans\n\n self.passage_encoder = nn.LSTM(input_size=300 + 16,\n hidden_size=256,\n num_layers=2,\n bidirectional=True,\n dropout=0.3)\n self.query_encoder = nn.LSTM(input_size=300,\n hidden_size=256,\n num_layers=2,\n bidirectional=True,\n dropout=0.3)\n\n self._matrix_attention = LegacyMatrixAttention()\n\n self.LinearAttn = LinearSeqAttn(input_size=512)\n\n self.linear = nn.Linear(2048, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, query, passage):\n\n # 0.\n passage, answer, src_lens = passage\n query, tgt_lens = query\n batch_size = passage.size(0)\n\n # sorted by lengths\n src_index = [index for index, value in sorted(list(enumerate(src_lens)), key=lambda x: x[1], reverse=True)]\n src_index_r = [index for index, value in sorted(list(enumerate(src_index)), key=lambda x: x[1])]\n src_index = torch.LongTensor(src_index)\n src_index_r = torch.LongTensor(src_index_r)\n tgt_index = [index for index, value in sorted(list(enumerate(tgt_lens)), key=lambda x: x[1], reverse=True)]\n tgt_index_r = [index for index, value in sorted(list(enumerate(tgt_index)), key=lambda x: x[1])]\n tgt_index = torch.LongTensor(tgt_index)\n tgt_index_r = torch.LongTensor(tgt_index_r)\n\n passage = passage[src_index].permute(1,0)\n answer = answer[src_index].permute(1,0)\n src_lens = src_lens[src_index].tolist()\n query = query[tgt_index].permute(1,0)\n tgt_lens = tgt_lens[tgt_index].tolist()\n\n # 0.1 Encoding\n embedded_passage = self.embeddings_src(passage)\n embedded_query = self.embeddings_tgt(query) # (W, N, D)\n embedded_answer = self.embeddings_ans(answer)\n embedded_pa = torch.cat((embedded_passage, embedded_answer), dim=-1)\n\n # pack\n pa = pack(embedded_pa, src_lens)\n qu = pack(embedded_query, tgt_lens)\n\n # 1. Separately encoding.\n passage_hiddens, encoded_passage = self.passage_encoder(pa)\n query_hiddens, encoded_query = self.query_encoder(qu)\n\n query_rep = encoded_query[0].transpose(0,1).contiguous().view(batch_size, -1)\n passage_rep = encoded_passage[0].transpose(0,1).contiguous().view(batch_size, -1)\n\n # recover\n query_rep = query_rep[tgt_index_r]\n passage_rep = passage_rep[src_index_r]\n\n output = torch.cat((query_rep, passage_rep), 1)\n # output = query_rep\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def batchClassify(self, inp, passage):\n \"\"\"\n Classifies a batch of sequences.\n\n Inputs: inp\n - inp: batch_size x seq_len\n\n Returns: out\n - out: batch_size ([0,1] score)\n \"\"\"\n # h = self.init_hidden(inp[0].size()[0])\n out = self.forward(inp, passage)\n return out.view(-1)\n\n\nclass StackedCNN(nn.Module):\n def __init__(self, emb_src, emb_tgt):\n super(StackedCNN , self).__init__()\n self.embeddings_src = emb_src\n self.embeddings_tgt = emb_tgt\n\n self.passage_encoder = nn.LSTM(input_size=300,\n hidden_size=256,\n num_layers=2,\n bidirectional=True,\n batch_first=True,\n dropout=0.3)\n self.query_encoder = nn.LSTM(input_size=300,\n hidden_size=256,\n num_layers=2,\n bidirectional=True,\n batch_first=True,\n dropout=0.3)\n\n self._matrix_attention = LegacyMatrixAttention()\n\n self.LinearAttn = LinearSeqAttn(input_size=512)\n\n self.linear = nn.Linear(2048, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, query, passage):\n\n # 0.\n passage, answer = passage\n batch_size = passage.size(0)\n passage_length = passage.size(1)\n passage_mask = passage.eq(0)\n query_mask = query.eq(0)\n\n # 0.1 Encoding\n embedded_passage = self.embeddings_src(passage)\n embedded_query = self.embeddings_tgt(query) # (N, W, D)\n\n # 1. Separately encoding.\n passage_hiddens, encoded_passage = self.passage_encoder(embedded_passage)\n query_hiddens, encoded_query = self.query_encoder(embedded_query)\n\n query_rep = encoded_query[0].transpose(0,1).contiguous().view(batch_size, -1)\n passage_rep = encoded_passage[0].transpose(0,1).contiguous().view(batch_size, -1)\n\n output = torch.cat((query_rep, passage_rep), 1)\n # output = query_rep\n prob = self.sigmoid(self.linear(output))\n return prob\n\n def batchClassify(self, inp, passage):\n \"\"\"\n Classifies a batch of sequences.\n\n Inputs: inp\n - inp: batch_size x seq_len\n\n Returns: out\n - out: batch_size ([0,1] score)\n \"\"\"\n # h = self.init_hidden(inp[0].size()[0])\n out = self.forward(inp, passage)\n return out.view(-1)" }, { "alpha_fraction": 0.48629406094551086, "alphanum_fraction": 0.4934140145778656, "avg_line_length": 35.96052551269531, "blob_id": "8a6c7086cc038537785ece28b63049a4e6c8b191", "content_id": "18037c87529b70aeddfa7353d8ae098a2618d8ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2811, "license_type": "no_license", "max_line_length": 106, "num_lines": 76, "path": "/code/rollout.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport copy\nimport torch\nfrom nltk.translate import bleu_score\nfrom config import CUDA, lamda, base\n\nclass Rollout(object):\n \"\"\"Roll-out policy\"\"\"\n def __init__(self, model, update_rate):\n self.ori_model = model\n self.own_model = copy.deepcopy(model)\n self.update_rate = update_rate\n\n def get_reward(self, x, passage, src_data_wrap, num, discriminator, src_rev, rev, train_ref, tgt_pad):\n \"\"\"\n Args:\n x : (batch_size, seq_len) input data\n num : roll-out number\n discriminator : discriminator model\n \"\"\"\n x = x[0]\n x_len =x[1]\n rewards = []\n batch_size = x.size(0)\n seq_len = x.size(1)\n src_data_k = src_data_wrap[0]\n src_data_k = src_rev.reverse(src_data_k)\n for i in range(num):\n for l in range(1, seq_len+1):\n data = x[:, 0:l]\n if data[:, l - 1].eq(tgt_pad).sum() == batch_size:\n break\n\n if l < seq_len:\n samples, lengths = self.own_model.sample(src_data_wrap, 'random', data)\n samples_wrap = (samples, lengths)\n pred = discriminator.batchClassify(samples_wrap, passage)\n if l == seq_len:\n samples = data\n lengths = x_len\n samples_wrap = (samples, lengths)\n pred = discriminator.batchClassify(samples_wrap, passage)\n\n # bleu-4 reward\n reward_b = []\n s_r = samples.permute(1, 0)\n s_r = rev.reverse(s_r) # todo: reverse 有unk\n for k in range(samples.size(0)):\n key = \" \".join([idx for idx in src_data_k[k]])\n score = bleu_score.sentence_bleu(train_ref[key], s_r[k].split())\n reward_b.append(score)\n reward_b = torch.tensor(reward_b)\n\n reward = lamda * (pred-base) + (1-lamda) * reward_b.type_as(pred)\n # reward = lamda * (pred-base)\n # reward = reward_b.type_as(pred)\n\n if i == 0:\n rewards.append(reward)\n else:\n rewards[l-1] += reward\n\n rewards = torch.stack(rewards)\n rewards = rewards / (1.0 * num) # seq_len * batch_size\n return rewards\n\n\n def update_params(self):\n dic = {}\n for name, param in self.ori_model.named_parameters():\n dic[name] = param.data\n for name, param in self.own_model.named_parameters():\n if name.startswith('emb'):\n param.data = dic[name]\n else:\n param.data = self.update_rate * param.data + (1 - self.update_rate) * dic[name]\n" }, { "alpha_fraction": 0.600612461566925, "alphanum_fraction": 0.6039300560951233, "avg_line_length": 41.129032135009766, "blob_id": "a8377836f069d91a9f65b1d939c3cd590bb55787", "content_id": "50c7f2269faee5f450f3820021d69848ee517529", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7837, "license_type": "no_license", "max_line_length": 155, "num_lines": 186, "path": "/code/data.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "from torchtext.data import Field\nfrom torchtext.data import TabularDataset\nfrom torchtext.data import BucketIterator, Iterator\nimport torch\nfrom config import tgt_vocab_size, src_vocab_size, batch_size, data_name\n\n\nclass ReversibleField(Field):\n def __init__(self, **kwargs):\n if kwargs.get('tokenize') is list:\n self.use_revtok = False\n else:\n self.use_revtok = True\n if kwargs.get('tokenize') is None:\n kwargs['tokenize'] = 'revtok'\n if 'unk_token' not in kwargs:\n kwargs['unk_token'] = ' UNK '\n super(ReversibleField, self).__init__(**kwargs)\n\n def reverse(self, batch, src_data=None, att=None, dic_src=None):\n if self.use_revtok:\n try:\n import revtok\n except ImportError:\n print(\"Please install revtok.\")\n raise\n if not self.batch_first:\n batch = batch.t()\n with torch.cuda.device_of(batch):\n batch = batch.tolist()\n batch = [[self.vocab.itos[ind] for ind in ex] for ex in batch] # denumericalize\n\n if att is not None:\n for i in range(len(batch)):\n for j in range(len(batch[i])):\n if batch[i][j] == '<unk>':\n _, maxIndex = att[j].max(0)\n batch[i][j] = dic_src.vocab.itos[src_data[maxIndex[0]]]\n\n def trim(s, t):\n sentence = []\n for w in s:\n if w == t:\n break\n sentence.append(w+' ')\n return sentence\n\n batch = [trim(ex, self.eos_token) for ex in batch] # trim past frst eos\n\n def filter_special(tok):\n return tok not in (self.init_token, self.pad_token)\n\n batch = [filter(filter_special, ex) for ex in batch]\n if self.use_revtok:\n return [revtok.detokenize(ex) for ex in batch]\n return [''.join(ex) for ex in batch]\n\n\ndef train_data():\n tokenize = lambda x: x.split()\n\n Text_src = Field(sequential=True, tokenize=tokenize, eos_token='<EOS>', include_lengths=True, lower=True)\n Answer = Field(sequential=True, tokenize=tokenize, eos_token='<EOS>', include_lengths=True, lower=True)\n Text_tgt = Field(sequential=True, tokenize=tokenize, eos_token='<EOS>',\n include_lengths=True, init_token='<SOS>', lower=True)\n\n\n trn_datafields = [(\"source\",Text_src),\n (\"target\", Text_tgt),\n (\"answer\", Answer)]\n trn, val = TabularDataset.splits(\n path=\"../data/\"+str(data_name), # the root directory where the data lies\n train='train.json', validation = 'validation.json',\n format='json',\n # skip_header=True, # if your csv header has a header, make sure to pass this to ensure it doesn't get proceesed as data!\n fields={'source': trn_datafields[0], 'target': trn_datafields[1], 'answer': trn_datafields[2]})\n\n # Text_src.build_vocab(trn, max_size=vocab_size)\n Text_src.build_vocab(trn, max_size=src_vocab_size)\n Text_tgt.build_vocab(trn, max_size=tgt_vocab_size)\n Answer.build_vocab(trn)\n Text_src.vocab.load_vectors(\"glove.840B.300d\")\n Text_tgt.vocab.load_vectors(\"glove.840B.300d\")\n\n train_iter, val_iter = BucketIterator.splits(\n (trn, val), # we pass in the datasets we want the iterator to draw data from\n batch_sizes= (batch_size, batch_size),\n device=-1, # if you want to use the GPU, specify the GPU number here\n sort_key=lambda x: len(x.source), # the BucketIterator needs to be told what function it should use to group the data.\n sort_within_batch=True,\n shuffle = True,\n repeat= False)\n\n\n Text_tgt_r = ReversibleField(sequential=True, include_lengths=True,\n eos_token='<EOS>', init_token='<SOS>', lower=True)\n Text_tgt_r.vocab = Text_tgt.vocab\n\n Text_src_r = ReversibleField(sequential=True, include_lengths=True,\n eos_token='<EOS>', lower=True)\n Text_src_r.vocab = Text_src.vocab\n\n Text_ans_r = ReversibleField(sequential=True, tokenize=tokenize,\n eos_token='<EOS>', include_lengths=True, lower=True)\n Text_ans_r.vocab = Answer.vocab\n\n src_pad = Text_src.vocab.stoi['<pad>']\n src_unk = Text_src.vocab.stoi['<unk>']\n src_eos = Text_src.vocab.stoi['<EOS>']\n src_special = [src_pad, src_unk, src_eos]\n\n ans_pad = Answer.vocab.stoi['<pad>']\n ans_unk = Answer.vocab.stoi['<unk>']\n ans_eos = Answer.vocab.stoi['<EOS>']\n ans_special = [ans_pad, ans_unk, ans_eos]\n\n tgt_pad = Text_tgt.vocab.stoi['<pad>']\n tgt_unk = Text_tgt.vocab.stoi['<unk>']\n tgt_eos = Text_tgt.vocab.stoi['<EOS>']\n tgt_sos = Text_tgt.vocab.stoi['<SOS>']\n tgt_special = [tgt_pad, tgt_unk, tgt_eos, tgt_sos]\n\n\n # discriminator data iterator\n passage = Field(sequential=True, tokenize=tokenize, eos_token='<EOS>', include_lengths=True, lower=True)\n ans = Field(sequential=True, tokenize=tokenize, eos_token='<EOS>', include_lengths=True, lower=True)\n ques = Field(sequential=True, tokenize=tokenize, eos_token='<EOS>',include_lengths=True, lower=True)\n target = Field(sequential=False, use_vocab=False)\n\n disc_trn_datafields = [(\"question\", ques),\n (\"answer\", ans),\n (\"passage\", passage),\n (\"target\", target)]\n\n disc_trn = TabularDataset(\n path=\"../data/\" + str(data_name) + \"/disc.json\", # the root directory where the data lies\n # train='disc.json',\n format='json',\n # skip_header=True, # if your csv header has a header, make sure to pass this to ensure it doesn't get proceesed as data!\n fields={'question': disc_trn_datafields[0], 'answer': disc_trn_datafields[1], 'passage': disc_trn_datafields[2], 'target': disc_trn_datafields[3]})\n\n passage.vocab = Text_src.vocab\n ans.vocab = Answer.vocab\n ques.vocab = Text_tgt.vocab\n\n disc_train_iter = BucketIterator(\n dataset=disc_trn, # we pass in the datasets we want the iterator to draw data from\n batch_size = batch_size,\n device=-1, # if you want to use the GPU, specify the GPU number here\n sort_key=lambda x: len(x.question),\n # the BucketIterator needs to be told what function it should use to group the data.\n sort_within_batch=True,\n shuffle=True,\n repeat=False)\n\n\n\n # raw data iterator\n Text_tgt_raw = ReversibleField(sequential=True, tokenize=tokenize, include_lengths=True, lower=True)\n\n trn_datafields = [(\"source\", Text_tgt_raw),\n (\"target\", Text_tgt_raw)]\n trn_raw, val_raw = TabularDataset.splits(\n path=\"../data/\"+str(data_name), # the root directory where the data lies\n train='train.json', validation='validation.json',\n format='json',\n # skip_header=True,\n # if your csv header has a header, make sure to pass this to ensure it doesn't get proceesed as data!\n fields={'source': trn_datafields[0], 'target': trn_datafields[1]})\n\n\n Text_tgt_raw.build_vocab(val_raw)\n\n train_iter_raw, val_iter_raw = BucketIterator.splits(\n (trn_raw, val_raw), # we pass in the datasets we want the iterator to draw data from\n batch_sizes=(batch_size, batch_size),\n device=-1, # if you want to use the GPU, specify the GPU number here\n sort_key=lambda x: len(x.source),\n # the BucketIterator needs to be told what function it should use to group the data.\n sort_within_batch=True,\n shuffle=True,\n repeat=False)\n\n\n return train_iter, val_iter, src_special, tgt_special, Text_tgt_r, val_iter_raw, Text_tgt_raw, Text_src_r,\\\n Text_src, Text_tgt, ans_special, Text_ans_r, disc_train_iter\n\n" }, { "alpha_fraction": 0.5732733607292175, "alphanum_fraction": 0.5888389348983765, "avg_line_length": 34.54917907714844, "blob_id": "67c57c11d44fc489b44d9baa0d3306715b22c4f9", "content_id": "0cc94ce174ab925e17dcd28ee5838e4fc2645a45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8673, "license_type": "no_license", "max_line_length": 139, "num_lines": 244, "path": "/code/train_discriminator.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "from __future__ import print_function, division\nimport sys\nimport math\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport generator\nimport discriminator\nimport helpers\nimport evaluation\nfrom data import batch_size\nfrom data import tgt_vocab_size, src_vocab_size\nfrom Optim import Optim\nfrom rollout import Rollout\nimport reference\nimport data\n\nCUDA = True\ndevice = torch.device(\"cuda:0\")\n# pretrain\nMLE_TRAIN_EPOCHS = 10\nPRETRAIN_DISC_EPOCHS = 10\n# PRETRAN_D_STEPS = 10\npretrain_acc = 0.8\n# Show_num = 4\nmax_sent_len = 100 # for padding\n\n# adv_train\nupdate_learning_rate = 0.8 # policy gradient training\nADV_TRAIN_EPOCHS = 50\nADV_DISC_EPOCHS = 5\nADV_acc = 0.8\nADV_d_step = 1\nADV_pg_batches = int(1000/64) # todo: this setting\nrollout_size = 20\nADV_dis_batches = int(10000/64) # todo: this setting\n\ntrain_iter, val_iter, src_special, tgt_special, rev, \\\nval_iter_raw, rev_raw, src_rev, src, tgt, ans_special, ans_rev, disc_train_iter = data.train_data()\n\ntgt_pad = tgt_special[0]\ntgt_unk = tgt_special[1]\ntgt_eos = tgt_special[2]\ntgt_sos = tgt_special[3]\nsrc_pad = src_special[0]\nsrc_unk = src_special[1]\nsrc_eos = src_special[2]\nans_pad = ans_special[0]\nans_unk = ans_special[1]\nans_eos = ans_special[2]\n\ntrain_ref, tgt_ref = reference.ref(src_rev, rev, rev_raw, train_iter, val_iter_raw)\n\ndef train_discriminator(discriminator, dis_opt, train_iter, generator, out_acc, epochs, ADV_batches = None):\n \"\"\"\n Training the discriminator on real_data_samples (positive) and generated samples from generator (negative).\n Samples are drawn d_steps times, and the discriminator is trained for epochs epochs.\n \"\"\"\n def eval(val_iter, discriminator, generator):\n # validation\n discriminator.eval()\n print('validation :', end=' ')\n total_acc = 0\n num_samples = 0\n total_loss = 0\n for i, data in enumerate(val_iter):\n tgt_data = data.target[0].permute(1, 0) # batch_size X length\n src_data_wrap = data.source\n ans = data.answer[0]\n\n if CUDA:\n scr_data = data.source[0].to(device)\n scr_lengths = data.source[1].to(device)\n ans = ans.to(device)\n src_data_wrap = (scr_data, scr_lengths, ans)\n\n real_samples = tgt_data\n real_lengths = data.target[1]\n passage = src_data_wrap[0].permute(1, 0)\n\n with torch.no_grad():\n fake_samples, fake_lengths = generator.sample(src_data_wrap)\n # prepare prepare_discriminator_data input\n fake_samples = fake_samples.cpu()\n fake_lengths = fake_lengths.cpu()\n ans = ans.permute(1, 0).cpu()\n\n # shuffle data\n dis_inp, dis_target, dis_len, dis_pa, dis_an = helpers.prepare_discriminator_data(real_samples, real_lengths,\n fake_samples, fake_lengths, passage, ans, tgt_special)\n inp, target = dis_inp, dis_target\n lengths, pa = dis_len, dis_pa\n an = dis_an\n\n if CUDA:\n inp = inp.to(device)\n target = target.to(device).type(torch.float)\n lengths = lengths.to(device)\n pa = pa.to(device)\n an = an.to(device)\n pa = (pa, an)\n\n # inp = (inp, lengths)\n out = discriminator.batchClassify(inp, pa)\n loss_fn = nn.BCELoss() # todo: should .cuda??\n loss = loss_fn(out, target)\n total_loss += loss.item()\n num_samples += tgt_data.size(0) * 2\n total_acc += torch.sum((out > 0.5) == (target > 0.5)).item()\n\n total_acc = total_acc * 1.0 / float(num_samples)\n print('loss = %.4f' % (total_loss / (num_samples)), end=' ')\n print('val_acc = %.4f\\n' % (total_acc))\n discriminator.train()\n return total_acc\n\n\n for epoch in range(epochs):\n discriminator.train()\n print('\\n epoch %d : ' % (epoch + 1), end='')\n total_loss = 0\n total_acc = 0\n true_acc = 0\n num_samples = 0\n\n for i, dis_data in enumerate(disc_train_iter):\n inp, inp_length = dis_data.question\n target = dis_data.target\n pa, pa_length = dis_data.passage\n ans, ans_length = dis_data.answer\n num_samples += inp.size(1)\n\n if CUDA:\n pa = pa.transpose(0,1)\n inp = inp.transpose(0,1)\n ans = ans.transpose(0,1)\n\n inp = inp.to(device)\n target = target.to(device).type(torch.float)\n # lengths = lengths.to(device)\n ans = ans.to(device)\n pa = pa.to(device)\n pa = (pa, ans)\n\n\n # inp = (inp, lengths)\n dis_opt.zero_grad()\n out = discriminator.batchClassify(inp, pa) # hidden = none over here\n loss_fn = nn.BCELoss()\n loss = loss_fn(out, target)\n loss.backward()\n dis_opt.step()\n\n total_loss += loss.item()\n total_acc += torch.sum((out>0.5)==(target>0.5)).item()\n true = (target > 0.5).type(torch.FloatTensor)\n out = out.cpu()\n out_true = out * true\n true_acc += torch.sum(out_true > 0.5).item()\n\n total_acc = total_acc * 1.0 / float(num_samples)\n true_acc = true_acc * 1.0 / float(num_samples/2)\n print('loss = %.4f, train_acc = %.4f' % (total_loss/(num_samples), total_acc), end=' ')\n print('true_acc = %.4f' % true_acc)\n val_acc = eval(val_iter, discriminator, generator)\n # dis_opt.updateLearningRate(val_acc)\n\n\n # todo: when to stop the discriminator MLE training(below is my randomly settings)\n flag = 0\n if ADV_batches is None:\n if val_acc > out_acc:\n flag = 1\n break\n\n\n# MAIN\nif __name__ == '__main__':\n\n emb_src = nn.Embedding(src_vocab_size + 3, 300, padding_idx=src_pad)\n emb_tgt = nn.Embedding(tgt_vocab_size + 4, 300, padding_idx=tgt_pad)\n emb_ans = nn.Embedding(6, 16, padding_idx=ans_pad)\n\n emb_src.weight.data.copy_(src.vocab.vectors.to(device))\n emb_tgt.weight.data.copy_(tgt.vocab.vectors.to(device))\n\n emb_tgt.weight.requires_grad = False\n emb_src.weight.requires_grad = False\n\n enc = generator.encoder(emb_src, emb_ans, src_special, ans_special)\n dec = generator.decoder(emb_tgt, tgt_special)\n gen = generator.NQGgenerator(enc, dec, gpu=CUDA)\n # dis = discriminator.Discriminator(emb_src, emb_tgt, emb_ans, gpu=CUDA)\n # dis = discriminator.PQANet(emb_src, emb_tgt)\n # dis = discriminator.TransormerNet(emb_src, emb_tgt)\n dis = discriminator.BiLSTM(emb_src, emb_tgt)\n\n print(dis)\n\n if CUDA:\n enc = enc.to(device)\n dec = dec.to(device)\n gen = gen.to(device)\n dis = dis.to(device)\n\n emb_ans.weight.requires_grad = False\n # torch.save(gen.state_dict(), pretrained_gen_path)\n # gen.load_state_dict(torch.load(pretrained_gen_path))\n\n # PRETRAIN DISCRIMINATOR\n print('\\nStarting Discriminator Training...')\n # dis_optimizer = optim.Adam(dis.parameters(), lr=1e-3)\n dis_optimizer = Optim('adam', 1e-3, lr_decay=0.5, max_weight_value=1.0)\n dis_optimizer.set_parameters(dis.parameters())\n train_discriminator(dis, dis_optimizer, train_iter, gen, pretrain_acc, PRETRAIN_DISC_EPOCHS)\n\n # torch.save(dis.state_dict(), pretrained_dis_path)\n # dis.load_state_dict(torch.load(pretrained_dis_path))\n # ADVERSARIAL TRAINING\n pg_count=10000\n best_advbleu = 0\n\n pg_optimizer = Optim('myadam', 1e-3, max_grad_norm=5)\n pg_optimizer.set_parameters(gen.parameters())\n gen_optimizer.reset_learningrate(1e-3)\n dis_optimizer.reset_learningrate(1e-3)\n\n for epoch in range(ADV_TRAIN_EPOCHS):\n print('\\n--------\\nEPOCH %d\\n--------' % (epoch+1))\n # TRAIN GENERATOR\n print('\\nAdversarial Training Generator : ', end='')\n sys.stdout.flush()\n emb_ans.weight.requires_grad = True\n gen.train()\n train_generator_PG(gen, pg_optimizer, dis, train_iter, ADV_pg_batches)\n # todo: should add teacher forcing traning after PG training?\n print(\"teacher forcing training after PG training\")\n train_generator_MLE(gen, gen_optimizer, train_iter, 1)\n\n emb_ans.weight.requires_grad = False\n gen.eval()\n # TRAIN DISCRIMINATOR\n print('\\nAdversarial Training Discriminator : ')\n train_discriminator(dis, dis_optimizer, train_iter, gen, ADV_acc, ADV_DISC_EPOCHS, ADV_dis_batches)" }, { "alpha_fraction": 0.5423772931098938, "alphanum_fraction": 0.5561044216156006, "avg_line_length": 37.465999603271484, "blob_id": "7b27a13a8236adc5a9358863bd68885e47e56f9b", "content_id": "b2cc745d4044f7b0c0365bd0f2d429f62224b3b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19232, "license_type": "no_license", "max_line_length": 153, "num_lines": 500, "path": "/code/train.py", "repo_name": "bielinzz/Improving-NQG-with-CGAN", "src_encoding": "UTF-8", "text": "from __future__ import print_function, division\nimport sys\nimport math\nimport random\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport generator\nimport discriminator\nimport helpers\nimport evaluation\nfrom data import batch_size\nfrom data import tgt_vocab_size, src_vocab_size\nfrom Optim import Optim\nfrom rollout import Rollout\nimport reference\nimport data\n\nCUDA = True\ndevice = torch.device(\"cuda:0\")\n# pretrain\nMLE_TRAIN_EPOCHS = 20\nPRETRAIN_DISC_EPOCHS = 5\n# PRETRAN_D_STEPS = 10\npretrain_acc = 0.8\n# Show_num = 4\nmax_sent_len = 100 # for padding\n\n# adv_train\nupdate_learning_rate = 0.8 # policy gradient training\nADV_TRAIN_EPOCHS = 50\nADV_DISC_EPOCHS = 5\nADV_acc = 0.8\nADV_d_step = 1\nADV_pg_batches = int(64/64) # todo: this setting\nrollout_size = 20\nADV_dis_batches = int(5000/64) # todo: this setting\n\ntrain_iter, val_iter, src_special, tgt_special, rev, val_iter_raw, rev_raw, src_rev, src, tgt, ans_special, ans_rev, disc_train_iter= data.train_data()\ntgt_pad = tgt_special[0]\ntgt_unk = tgt_special[1]\ntgt_eos = tgt_special[2]\ntgt_sos = tgt_special[3]\nsrc_pad = src_special[0]\nsrc_unk = src_special[1]\nsrc_eos = src_special[2]\nans_pad = ans_special[0]\nans_unk = ans_special[1]\nans_eos = ans_special[2]\n\ntrain_ref, tgt_ref = reference.ref(src_rev, rev, rev_raw, train_iter, val_iter_raw)\n\ndef train_generator_MLE(gen, gen_opt, train_iter, epochs, adv_flag=None):\n \"\"\"\n Max Likelihood Pretraining for the generator\n \"\"\"\n best_bleu = 0\n if adv_flag == True:\n global best_advbleu\n\n for epoch in range(epochs):\n print('epoch %d : ' % (epoch + 1))\n total_loss = 0\n num_words = 0\n report_loss = 0\n report_num = 0\n for i, data in enumerate(train_iter):\n if adv_flag == True:\n if i == 1:\n break\n\n tgt_data = data.target[0]\n src_data_wrap = data.source\n ans = data.answer[0]\n\n if CUDA:\n scr_data = data.source[0].to(device)\n scr_lengths = data.source[1].to(device)\n ans = ans.to(device)\n src_data_wrap = (scr_data, scr_lengths, ans)\n\n tgt_lengths = data.target[1]\n tgt_lengths = torch.LongTensor(tgt_lengths)\n num_words += tgt_lengths.sum().item()\n\n tgt_data=tgt_data.permute(1,0) # --> batch x length\n inp, target = helpers.prepare_generator_batch(tgt_data, gpu=CUDA)\n gen_opt.zero_grad()\n loss = gen.batchNLLLoss(src_data_wrap, inp, target) # inp means decoder inp, target means decoder target.\n loss.div(tgt_data.size(1)).backward()\n # loss.backward()\n gen_opt.step()\n\n report_loss += loss.item()\n report_num += tgt_data.size(1)\n total_loss += loss.item()\n\n # if i % 20 == -1 % 20:\n # print((\"inter loss = %.4f\") % (report_loss / report_num))\n # report_loss = 0\n # report_num = 0\n\n loss_perword = total_loss / num_words\n train_ppl = math.exp(min(loss_perword, 100))\n print('loss = %.4f' % (total_loss / len(train_iter.dataset)))\n print('ppl = %.4f' % train_ppl)\n\n # evaluate blue scores\n # valid data\n # if epoch%5 == -1%5:\n gen.eval()\n # print(\"Set gen to {0} mode\".format('train' if model.decoder.dropout.training else 'eval'))\n valid_bleu = evaluation.evalModel(gen, val_iter, epoch, rev, src_special, tgt_special, tgt_ref, src_rev)\n print('Validation bleu-4 = %g' % (valid_bleu * 100))\n\n if adv_flag != True:\n if valid_bleu > best_bleu:\n best_bleu = valid_bleu\n torch.save(gen.state_dict(), 'params.pkl')\n print('save '+str(epoch + 1)+' epoch model')\n else:\n if valid_bleu > best_advbleu:\n best_advbleu = valid_bleu\n torch.save(gen.state_dict(), 'params.pkl')\n print('save ' + str(epoch + 1) + ' epoch model')\n\n gen_opt.updateLearningRate(valid_bleu)\n #train_bleu = evaluation.evalModel(gen, train_iter)\n #print('training bleu = %g' % (train_bleu * 100))\n gen.train()\n # print(\"Set gen to {0} mode\".format('train' if model.decoder.dropout.training else 'eval'))\n\n\n\ndef train_generator_PG(gen, gen_opt, dis, train_iter, num_batches):\n \"\"\"\n The generator is trained using policy gradients, using the reward from the discriminator.\n Training is done for num_batches batches.\n \"\"\"\n global pg_count\n global best_advbleu\n pg_count += 1\n num_sentences = 0\n total_loss = 0\n rollout = Rollout(gen, update_learning_rate)\n for i, data in enumerate(train_iter):\n if i == num_batches:\n break\n src_data_wrap = data.source\n ans = data.answer[0]\n # tgt_data = data.target[0].permute(1, 0)\n passage = src_data_wrap[0].permute(1, 0)\n\n if CUDA:\n src_data = data.source[0].to(device) # lengths x batch_size\n src_lengths = data.source[1].to(device)\n ans = ans.to(device)\n ans_p = ans.permute(1, 0)\n src_data_wrap = (src_data, src_lengths, ans)\n passage = passage.to(device)\n passage = (passage, ans_p, src_lengths)\n\n num_sentences += src_data.size(1)\n with torch.no_grad():\n samples, sam_lens = gen.sample(src_data_wrap, way='greedy') # 64 batch_size works best\n sam_wrap = (samples, sam_lens)\n rewards = rollout.get_reward(sam_wrap, passage, src_data_wrap, rollout_size, dis, src_rev, rev, train_ref, tgt_pad)\n\n inp, target = helpers.prepare_generator_batch(samples, gpu=CUDA)\n\n gen_opt.zero_grad()\n pg_loss = gen.batchPGLoss(src_data_wrap, inp, target, rewards)\n pg_loss.backward()\n gen_opt.step()\n total_loss += pg_loss\n rollout.update_params() # TODO: DON'T KNOW WHY\n\n gen.eval()\n # print(\"Set gen to {0} mode\".format('train' if model.decoder.dropout.training else 'eval'))\n valid_bleu = evaluation.evalModel(gen, val_iter, pg_count, rev, src_special, tgt_special, tgt_ref, src_rev)\n print('Validation bleu-4 = %g' % (valid_bleu * 100))\n if valid_bleu > best_advbleu:\n best_advbleu = valid_bleu\n torch.save(gen.state_dict(), 'advparams.pkl')\n print('save model')\n # train_bleu = evaluation.evalModel(gen, train_iter)\n # print('training bleu = %g' % (train_bleu * 100))\n gen.train()\n\n print(\"\\npg_loss on %d bactches : %.4f\" %(i, total_loss/num_batches))\n\n\ndef train_discriminator(discriminator, dis_opt, train_iter, generator, out_acc, epochs, ADV_batches = None):\n \"\"\"\n Training the discriminator on real_data_samples (positive) and generated samples from generator (negative).\n Samples are drawn d_steps times, and the discriminator is trained for epochs epochs.\n \"\"\"\n def eval(val_iter, discriminator, generator):\n # validation\n discriminator.eval()\n print('validation :', end=' ')\n total_acc = 0\n num_samples = 0\n total_loss = 0\n for i, data in enumerate(val_iter):\n tgt_data = data.target[0].permute(1, 0) # batch_size X length\n src_data_wrap = data.source\n ans = data.answer[0]\n\n if CUDA:\n src_data = data.source[0].to(device)\n src_lengths = data.source[1].to(device)\n ans = ans.to(device)\n src_data_wrap = (src_data, src_lengths, ans)\n\n real_samples = tgt_data\n real_lengths = data.target[1]\n passage = src_data_wrap[0].permute(1, 0)\n\n with torch.no_grad():\n fake_samples, fake_lengths = generator.sample(src_data_wrap, way='random')\n # prepare prepare_discriminator_data input\n fake_samples = fake_samples.cpu()\n fake_lengths = fake_lengths.cpu()\n ans = ans.permute(1, 0).cpu()\n src_lengths = src_lengths.cpu()\n\n # shuffle data\n dis_inp, dis_target, dis_len, dis_pa, dis_an, src_lens = helpers.prepare_discriminator_data(real_samples, real_lengths,\n fake_samples, fake_lengths, passage, ans, src_lengths, tgt_special)\n inp, target = dis_inp, dis_target\n lengths, pa = dis_len, dis_pa\n an = dis_an\n\n if CUDA:\n inp = inp.to(device)\n target = target.to(device)\n lengths = lengths.to(device)\n src_lens = src_lens.to(device)\n pa = pa.to(device)\n an = an.to(device)\n pa = (pa, an, src_lens)\n inp = (inp, lengths)\n\n # inp = (inp, lengths)\n out = discriminator.batchClassify(inp, pa)\n loss_fn = nn.BCELoss() # todo: should .cuda??\n loss = loss_fn(out, target)\n total_loss += loss.item()\n num_samples += tgt_data.size(0) * 2\n total_acc += torch.sum((out > 0.5) == (target > 0.5)).item()\n\n total_acc = total_acc * 1.0 / float(num_samples)\n print('loss = %.4f' % (total_loss / (num_samples)), end=' ')\n print('val_acc = %.4f\\n' % (total_acc))\n discriminator.train()\n return total_acc\n\n d_step = 0\n while(1):\n d_step += 1\n passages = []\n anses = []\n real_samples = []\n fake_samples = []\n real_lengths = []\n fake_lengths = []\n src_lengths = []\n\n for i, data in enumerate(train_iter):\n if ADV_batches is not None:\n if i+1 == ADV_batches:\n break\n\n tgt_data = data.target[0].permute(1, 0) # batch_size X length\n src_data_wrap = data.source\n ans = data.answer[0]\n\n if CUDA:\n src_data = data.source[0].to(device)\n src_length = data.source[1].to(device)\n ans = ans.to(device)\n src_data_wrap = (src_data, src_length, ans)\n\n real_sample = tgt_data\n real_length = data.target[1]\n with torch.no_grad():\n fake_sample, fake_length = generator.sample(src_data_wrap, way='random')\n fake_sample = fake_sample.cpu()\n fake_length = fake_length.cpu()\n ans = ans.permute(1, 0).cpu()\n src_length = src_length.cpu()\n\n # keep lengths as the same in order to pack\n passage = src_data_wrap[0].permute(1, 0)\n pad_len = max_sent_len - passage.size(1)\n m = nn.ConstantPad1d((0, pad_len), src_pad)\n passage = m(passage)\n ans = m(ans)\n\n # keep lengths as the same in order to pack\n pad_len = max_sent_len - real_sample.size(1)\n m = nn.ConstantPad1d((0, pad_len), tgt_pad)\n real_sample = m(real_sample)\n\n real_samples.append(real_sample)\n real_lengths.append(real_length)\n fake_samples.append(fake_sample)\n fake_lengths.append(fake_length)\n passages.append(passage)\n anses.append(ans)\n src_lengths.append(src_length)\n\n real_samples = torch.cat(real_samples, 0).type(torch.LongTensor)\n real_lengths = torch.cat(real_lengths, 0).type(torch.LongTensor)\n fake_samples = torch.cat(fake_samples, 0).type(torch.LongTensor)\n fake_lengths = torch.cat(fake_lengths, 0).type(torch.LongTensor)\n passages = torch.cat(passages, 0).type(torch.LongTensor)\n anses = torch.cat(anses, 0).type(torch.LongTensor)\n src_lengths = torch.cat(src_lengths, 0).type(torch.LongTensor)\n\n dis_inp, dis_target, dis_len, dis_pa, dis_an, src_lens = helpers.prepare_discriminator_data(real_samples, real_lengths,\n fake_samples, fake_lengths, passages, anses, src_lengths, tgt_special)\n\n # iterator\n # for i, dis_data in enumerate(dis_iter):\n # dis_inp = dis_data.question[0]\n # dis_target = dis_data.target\n # dis_pa = dis_data.passage[0]\n # dis_an = dis_data.answer[0]\n\n # collect discriminator data\n # disc_writer = open(\"disc.json\", \"w\")\n # question0 = rev.reverse(dis_inp.permute(1,0))\n # answer0 = ans_rev.reverse(dis_an.permute(1, 0))\n # passage0 = src_rev.reverse(dis_pa.permute(1, 0))\n # for i in range(len(dis_inp)):\n # disc_writer.write(\"{\\\"question\\\": \\\"\" + question0[i][6:] + \"\\\", \")\n # disc_writer.write(\"\\\"answer\\\": \\\"\" + answer0[i] + \"\\\", \")\n # disc_writer.write(\"\\\"passage\\\": \\\"\" + passage0[i] + \"\\\", \")\n # disc_writer.write(\"\\\"target\\\": \\\"\" + str(int(dis_target[i].item())) + \"\\\"}\" + \"\\n\")\n\n # # showcases\n # print(' sample showcase:')\n # show = rev.reverse(dis_inp[:Show_num].permute(1, 0))\n # for i in range(Show_num):\n # print(show[i])\n\n for epoch in range(epochs):\n discriminator.train()\n print('\\n d-step %d epoch %d : ' % (d_step, epoch + 1), end='')\n total_loss = 0\n total_acc = 0\n true_acc = 0\n num_samples = dis_inp.size(0)\n p = list(range(0, num_samples, batch_size))\n random.shuffle(p)\n for i in p:\n inp, target = dis_inp[i: i + batch_size], dis_target[i: i + batch_size]\n lengths = dis_len[i: i + batch_size]\n pa = dis_pa[i: i + batch_size]\n an = dis_an[i: i + batch_size]\n src_le = src_lens[i: i + batch_size]\n if CUDA:\n inp = inp.to(device)\n target = target.to(device)\n lengths = lengths.to(device)\n src_le = src_le.to(device)\n an = an.to(device)\n pa = pa.to(device)\n pa = (pa, an, src_le)\n inp = (inp, lengths)\n\n dis_opt.zero_grad()\n out = discriminator.batchClassify(inp, pa) # hidden = none over here\n loss_fn = nn.BCELoss().to(device)\n loss = loss_fn(out, target)\n loss.backward()\n dis_opt.step()\n\n total_loss += loss.item()\n total_acc += torch.sum((out>0.5)==(target>0.5)).item()\n true = (target > 0.5).type(torch.FloatTensor)\n out = out.cpu()\n out_true = out * true\n true_acc += torch.sum(out_true > 0.5).item()\n\n total_acc = total_acc * 1.0 / float(num_samples)\n true_acc = true_acc * 1.0 / float(num_samples/2)\n print('loss = %.4f, train_acc = %.4f' % (total_loss/(num_samples), total_acc), end=' ')\n print('true_acc = %.4f' % true_acc)\n val_acc = eval(val_iter, discriminator, generator)\n # dis_opt.updateLearningRate(val_acc)\n\n\n # todo: when to stop the discriminator MLE training(below is my randomly settings)\n flag = 0\n if ADV_batches is None:\n if val_acc > out_acc:\n flag = 1\n break\n\n elif d_step == 5 and epoch+1 == PRETRAIN_DISC_EPOCHS:\n flag = 1\n break\n\n else:\n if d_step == 1 and epoch+1 == 5:\n flag = 1\n break\n\n if flag == 1:\n break\n\n# MAIN\nif __name__ == '__main__':\n\n emb_src = nn.Embedding(src_vocab_size + 3, 300, padding_idx=src_pad)\n emb_tgt = nn.Embedding(tgt_vocab_size + 4, 300, padding_idx=tgt_pad)\n emb_ans = nn.Embedding(6, 16, padding_idx=ans_pad)\n\n emb_src.weight.data.copy_(src.vocab.vectors.to(device))\n emb_tgt.weight.data.copy_(tgt.vocab.vectors.to(device))\n\n enc = generator.encoder(emb_src, emb_ans, src_special, ans_special)\n dec = generator.decoder(emb_tgt, tgt_special)\n gen = generator.NQGgenerator(enc, dec, gpu=CUDA)\n # dis = discriminator.Discriminator(emb_src, emb_tgt, emb_ans, gpu=CUDA)\n # dis = discriminator.PQANet(emb_src, emb_tgt)\n # dis = discriminator.TransormerNet(emb_src, emb_tgt)\n dis = discriminator.BiLSTM(emb_src, emb_tgt, emb_ans)\n\n print(dis)\n\n if CUDA:\n enc = enc.to(device)\n dec = dec.to(device)\n gen = gen.to(device)\n dis = dis.to(device)\n\n # GENERATOR MLE TRAINING\n # print('Starting Generator MLE Training...')\n gen_optimizer = Optim('myadam', 1e-3, lr_decay=0.5, start_decay_at=8, max_grad_norm=5)\n gen_optimizer.set_parameters(gen.parameters())\n train_generator_MLE(gen, gen_optimizer, train_iter, MLE_TRAIN_EPOCHS)\n\n print('load the best metric model')\n gen.load_state_dict(torch.load('./model/params.pkl'))\n print('evaluating the best model')\n gen.eval()\n # print(\"Set gen to {0} mode\".format('train' if model.decoder.dropout.training else 'eval'))\n # valid_bleu = evaluation.evalModel(gen, val_iter, 100, rev, src_special, tgt_special, tgt_ref, src_rev)\n # print('Validation bleu-4 of the best model= %g' % (valid_bleu * 100))\n\n emb_ans.weight.requires_grad = False\n # torch.save(gen.state_dict(), pretrained_gen_path)\n # gen.load_state_dict(torch.load(pretrained_gen_path))\n\n # PRETRAIN DISCRIMINATOR\n print('\\nStarting Discriminator Training...')\n # dis_optimizer = optim.Adam(dis.parameters(), lr=1e-3)\n dis_optimizer = Optim('adam', 1e-3, lr_decay=0.5, max_weight_value=1.0)\n dis_optimizer.set_parameters(dis.parameters())\n train_discriminator(dis, dis_optimizer, train_iter, gen, pretrain_acc, PRETRAIN_DISC_EPOCHS)\n\n # torch.save(dis.state_dict(), pretrained_dis_path)\n # dis.load_state_dict(torch.load(pretrained_dis_path))\n # ADVERSARIAL TRAINING\n pg_count=10000\n best_advbleu = 0\n bleu = 0\n\n pg_optimizer = Optim('myadam', 1e-3, max_grad_norm=5)\n pg_optimizer.set_parameters(gen.parameters())\n gen_optimizer.reset_learningrate(1e-3)\n dis_optimizer.reset_learningrate(1e-3)\n\n for epoch in range(ADV_TRAIN_EPOCHS):\n print('\\n--------\\nEPOCH %d\\n--------' % (epoch+1))\n # TRAIN GENERATOR\n print('\\nAdversarial Training Generator : ', end='')\n sys.stdout.flush()\n emb_ans.weight.requires_grad = True\n gen.train()\n\n while(True):\n train_generator_PG(gen, pg_optimizer, dis, train_iter, ADV_pg_batches)\n # todo: should add teacher forcing traning after PG training?\n print(\"teacher forcing training after PG training\")\n train_generator_MLE(gen, gen_optimizer, train_iter, 1, True)\n if best_advbleu < bleu:\n break\n else:\n bleu = best_advbleu\n\n emb_ans.weight.requires_grad = False\n gen.eval()\n # TRAIN DISCRIMINATOR\n print('\\nAdversarial Training Discriminator : ')\n train_discriminator(dis, dis_optimizer, train_iter, gen, ADV_acc, ADV_DISC_EPOCHS, ADV_dis_batches)" } ]
12
exking/udi-solaredge-poly
https://github.com/exking/udi-solaredge-poly
f7e5dd415cff00f1e1d3f5c809ec1b4aabd40297
d9fa6715eddedfefd84b8ca22839fd97f8708778
0bfaee449cf90b4f1d383522c6aff590df279a9a
refs/heads/master
2020-03-24T07:55:32.423005
2020-03-12T15:32:22
2020-03-12T15:32:22
142,579,473
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7774480581283569, "alphanum_fraction": 0.781899094581604, "avg_line_length": 31.095237731933594, "blob_id": "e547614b89b2a4a710b5cf97826aae0c9e09d434", "content_id": "219b4d0a5b47c8ce782c506914ba12d6cb58740e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 674, "license_type": "permissive", "max_line_length": 139, "num_lines": 21, "path": "/README.md", "repo_name": "exking/udi-solaredge-poly", "src_encoding": "UTF-8", "text": "# UDI Polyglot v2 SolarEdge Poly\n\n[![license](https://img.shields.io/github/license/mashape/apistatus.svg)](https://github.com/exking/udi-solaredge-poly/blob/master/LICENSE)\n\nThis Poly provides an interface between SolarEdge devices and [Polyglot v2](https://github.com/UniversalDevicesInc/polyglot-v2) server.\n\n### Installation instructions\nYou can install NodeServer from the Polyglot store or manually running\n```\ncd ~/.polyglot/nodeservers\ngit clone https://github.com/exking/udi-solaredge-poly.git SolarEdge\ncd SolarEdge\n./install.sh\n```\nPlease add `api_key` configuration parameter.\n\n### Notes\n\nPlease report any problems on the UDI user forum.\n\nThanks and good luck.\n" }, { "alpha_fraction": 0.5077459812164307, "alphanum_fraction": 0.5206049680709839, "avg_line_length": 41.30828857421875, "blob_id": "95fcafc19eec42e47eaca1e64c0edea1e735c5ee", "content_id": "1bfebee99cd9d7872231bdbe4a93006664470003", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16331, "license_type": "permissive", "max_line_length": 257, "num_lines": 386, "path": "/solaredge-poly.py", "repo_name": "exking/udi-solaredge-poly", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nCLOUD = False\n\ntry:\n import polyinterface\nexcept ImportError:\n import pgc_interface as polyinterface\n CLOUD = True\nimport sys\nimport http.client\nfrom datetime import datetime, timedelta\nimport pytz\nimport logging\nimport json\n\nLOGGER = polyinterface.LOGGER\nSE_API_URL = 'monitoringapi.solaredge.com'\nSINGLE_PHASE = [ 'SE3000', 'SE3800', 'SE5000', 'SE6000', 'SE7600', 'SE10000', 'SE11400', 'SE5000H-US000BNU4', 'SE7600H-US000BNU4', 'SE10000H-US000BNU4' ]\nTHREE_PHASE = [ 'SE9K', 'SE10K', 'SE14.4K', 'SE20K', 'SE33.3K' ]\n\n\nclass Controller(polyinterface.Controller):\n def __init__(self, polyglot):\n super().__init__(polyglot)\n self.name = 'SolarEdge Controller'\n self.address = 'sectrl'\n self.primary = self.address\n self.api_key = None\n self.conn = None\n self.batteries = []\n\n def start(self):\n # LOGGER.setLevel(logging.INFO)\n LOGGER.info('Started SolarEdge controller')\n if 'api_key' not in self.polyConfig['customParams']:\n LOGGER.error('Please specify api_key in the NodeServer configuration parameters');\n return False\n self.api_key = self.polyConfig['customParams']['api_key']\n data = self.api_request('/version/current?api_key='+self.api_key)\n if data is None:\n return False\n if 'version' in data:\n LOGGER.info(f\"Successfully connected to the SolarEdge API Version {data['version']}\")\n self.discover()\n else:\n LOGGER.error('API request failed: {}'.format(json.dumps(data)))\n self.api_close()\n return False\n self.api_close()\n\n def api_request(self, url):\n if self.conn is None:\n self.conn = http.client.HTTPSConnection(SE_API_URL)\n try:\n self.conn.request('GET', url)\n response = self.conn.getresponse()\n except Exception as ex:\n LOGGER.error('Failed to connect to SolarEdge API: {}'.format(ex))\n self.api_close()\n # retry once\n self.conn = http.client.HTTPSConnection(SE_API_URL)\n try:\n self.conn.request('GET', url)\n response = self.conn.getresponse()\n except Exception as ex:\n LOGGER.error('Retry attempt failed! {}'.format(ex))\n self.api_close()\n return None\n if response.status == 200:\n try:\n data = json.loads(response.read().decode(\"utf-8\"))\n except Exception as ex:\n LOGGER.error('Failed to json parse API response {} {}'.format(ex, response.read().decode(\"utf-8\")))\n self.api_close()\n return None\n return data\n else:\n LOGGER.error('Bad API response: {}, URL: {}'.format(response.status, url))\n self.api_close()\n return None\n\n def api_close(self):\n if self.conn is not None:\n self.conn.close()\n self.conn = None\n\n def stop(self):\n LOGGER.info('SolarEdge is stopping')\n self.api_close()\n\n def shortPoll(self):\n for node in self.nodes:\n self.nodes[node].updateInfo()\n self.api_close()\n\n def longPoll(self):\n for node in self.nodes:\n self.nodes[node].updateInfo(long_poll=True)\n self.api_close()\n\n def updateInfo(self, long_poll=False):\n pass\n\n def query(self, command=None):\n for node in self.nodes:\n self.nodes[node].reportDrivers()\n\n def _start_time(self, site_tz):\n # Returns site datetime - 60 minutes\n st_time = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(minutes=60)\n return st_time.astimezone(pytz.timezone(site_tz)).strftime('%Y-%m-%d%%20%H:%M:%S')\n\n def _end_time(self, site_tz):\n # Returns current site time\n utc_time = datetime.utcnow().replace(tzinfo=pytz.utc)\n return utc_time.astimezone(pytz.timezone(site_tz)).strftime('%Y-%m-%d%%20%H:%M:%S')\n\n def discover(self, command=None):\n LOGGER.info('Discovering SolarEdge sites and equipment...')\n site_list = self.api_request('/sites/list?api_key='+self.api_key)\n if site_list is None:\n return False\n num_sites = int(site_list['sites']['count'])\n LOGGER.info('Found {} sites'.format(num_sites))\n if num_sites < 1:\n LOGGER.warning('No sites found')\n return False\n for site in site_list['sites']['site']:\n name = site['name']\n site_tz = site['location']['timeZone']\n address = str(site['id'])\n LOGGER.info('Found {} site id: {}, name: {}, TZ: {}'.format(site['status'], address, name, site_tz))\n if not address in self.nodes:\n LOGGER.info('Adding site id: {}'.format(address))\n self.addNode(SESite(self, address, address, name, site_tz))\n LOGGER.info('Requesting site inventory...')\n site_inv = self.api_request('/site/'+address+'/inventory?startTime='+self._start_time(site_tz)+'&endTime='+self._end_time(site_tz)+'&api_key='+self.api_key)\n if site_inv is None:\n return False\n num_meter = len(site_inv['Inventory']['meters'])\n num_sens = len(site_inv['Inventory']['sensors'])\n num_gways = len(site_inv['Inventory']['gateways'])\n num_batt = len(site_inv['Inventory']['batteries'])\n num_inv = len(site_inv['Inventory']['inverters'])\n LOGGER.info('Found: {} meters, {} sensors, {} gateways, {} batteries, {} inverters'.format(num_meter, num_sens, num_gways, num_batt, num_inv))\n for inverter in site_inv['Inventory']['inverters']:\n inv_name = inverter['name']\n inv_sn = inverter['SN']\n inv_addr = inverter['SN'].replace('-','').lower()[:14]\n if not inv_addr in self.nodes:\n LOGGER.info('Adding inverter {}'.format(inv_sn))\n if inverter['model'] in SINGLE_PHASE:\n self.addNode(SEInverter(self, address, inv_addr, inv_name, address, inv_sn, site_tz))\n else:\n LOGGER.error('Model {} is not yet supported'.format(inverter['model']))\n for battery in site_inv['Inventory']['batteries']:\n batt_name = battery['name']\n batt_sn = battery['SN']\n batt_addr = battery['SN'].replace('-','').lower()[:14]\n if not batt_addr in self.nodes:\n LOGGER.info('Adding battery {}'.format(batt_sn))\n self.addNode(SEBattery(self, address, batt_addr, batt_name, address, batt_sn, site_tz, battery))\n self.batteries.append(batt_sn)\n\n id = 'SECTRL'\n commands = {'DISCOVER': discover}\n drivers = [{'driver': 'ST', 'value': 1, 'uom': 2}]\n\n\nclass SESite(polyinterface.Node):\n def __init__(self, controller, primary, address, name, site_tz):\n super().__init__(controller, primary, address, name)\n self.site_tz = site_tz\n\n def start(self):\n self.updateInfo(long_poll=True)\n\n def updateInfo(self, long_poll=False):\n try:\n if not long_poll:\n return True\n url = '/site/'+self.address+'/powerDetails?startTime='+self.controller._start_time(self.site_tz)+'&endTime='+self.controller._end_time(self.site_tz)+'&api_key='+self.controller.api_key\n power_data = self.controller.api_request(url)\n\n if len(self.controller.batteries) > 0:\n url = '/site/'+self.address+'/storageData?serials='+','.join(map(str, self.controller.batteries))+'&startTime='+self.controller._start_time(self.site_tz)+'&endTime='+self.controller._end_time(self.site_tz)+'&api_key='+self.controller.api_key\n storage_data = self.controller.api_request(url)\n LOGGER.debug(storage_data)\n for battery in storage_data['storageData']['batteries']:\n batt_sn = battery['serialNumber']\n batt_addr = battery['serialNumber'].replace('-','').lower()[:14]\n if battery['telemetryCount'] > 0:\n self.controller.nodes[batt_addr].updateData(battery['telemetries'])\n else:\n LOGGER.debug('no battery telemetries received')\n\n LOGGER.debug(power_data)\n if power_data is None:\n self.setDriver('ST', 0)\n self.setDriver('GV0', 0)\n self.setDriver('GV1', 0)\n self.setDriver('GV2', 0)\n self.setDriver('GV3', 0)\n else:\n for meter in power_data['powerDetails']['meters']:\n if meter['type'] == 'Production':\n try:\n datapoint = meter['values'][-1]\n except:\n continue\n if len(datapoint) == 0:\n self.setDriver('ST', 0)\n if 'value' in datapoint:\n self.setDriver('ST', float(datapoint['value']))\n elif meter['type'] == 'Consumption':\n try:\n datapoint = meter['values'][-1]\n except:\n continue\n if len(datapoint) == 0:\n self.setDriver('GV0', 0)\n if 'value' in datapoint:\n self.setDriver('GV0', float(datapoint['value']))\n elif meter['type'] == 'Purchased':\n try:\n datapoint = meter['values'][-1]\n except:\n continue\n if len(datapoint) == 0:\n self.setDriver('GV1', 0)\n if 'value' in datapoint:\n self.setDriver('GV1', float(datapoint['value']))\n elif meter['type'] == 'SelfConsumption':\n try:\n datapoint = meter['values'][-1]\n except:\n continue\n if len(datapoint) == 0:\n self.setDriver('GV2', 0)\n if 'value' in datapoint:\n self.setDriver('GV2', float(datapoint['value']))\n elif meter['type'] == 'FeedIn':\n try:\n datapoint = meter['values'][-1]\n except:\n continue\n if len(datapoint) == 0:\n self.setDriver('GV3', 0)\n if 'value' in datapoint:\n self.setDriver('GV3', float(datapoint['value']))\n except Exception as ex:\n LOGGER.error('SESite updateInfo failed! {}'.format(ex))\n\n def query(self, command=None):\n self.reportDrivers()\n\n id = 'SESITE'\n commands = {'QUERY': query}\n drivers = [{'driver': 'ST', 'value': 0, 'uom': 73},\n {'driver': 'GV0', 'value': 0, 'uom': 73},\n {'driver': 'GV1', 'value': 0, 'uom': 73},\n {'driver': 'GV2', 'value': 0, 'uom': 73},\n {'driver': 'GV3', 'value': 0, 'uom': 73},\n ]\n\n\nclass SEInverter(polyinterface.Node):\n def __init__(self, controller, primary, address, name, site_id, serial_num, site_tz):\n super().__init__(controller, primary, address, name)\n self.serial_num = serial_num\n self.site_id = site_id\n self.site_tz = site_tz\n\n def start(self):\n self.updateInfo()\n\n def updateInfo(self, long_poll=False):\n if long_poll:\n return True\n try:\n url = '/equipment/'+self.site_id+'/'+self.serial_num+'/data?startTime='+self.controller._start_time(self.site_tz)+'&endTime='+self.controller._end_time(self.site_tz)+'&api_key='+self.controller.api_key\n inverter_data = self.controller.api_request(url)\n LOGGER.debug(inverter_data)\n if inverter_data is None:\n return False\n datapoints = int(inverter_data['data']['count'])\n if datapoints < 1:\n LOGGER.warning('No Inverter data received, skipping...')\n return False\n # Take latest data point\n data = inverter_data['data']['telemetries'][-1]\n if not 'L1Data' in data:\n LOGGER.error('Is this a single phase inverter? {}'.format(self.serial_num))\n return False\n self.setDriver('ST', float(data['L1Data']['activePower']))\n if 'reactivePower' in data['L1Data']:\n self.setDriver('GV0', float(data['L1Data']['reactivePower']))\n else:\n self.setDriver('GV0', 0)\n if 'apparentPower' in data['L1Data']:\n self.setDriver('CPW', float(data['L1Data']['apparentPower']))\n else:\n self.setDriver('CPW', 0)\n self.setDriver('CLITEMP', float(data['temperature']))\n self.setDriver('CV', float(data['L1Data']['acVoltage']))\n if data['dcVoltage'] is not None:\n self.setDriver('GV1', float(data['dcVoltage']))\n self.setDriver('GV2', round(float(data['L1Data']['acCurrent']), 1))\n self.setDriver('GV3', round(float(data['L1Data']['acFrequency']), 1))\n if data['inverterMode'] == 'MPPT':\n self.setDriver('GV4', 2)\n elif data['inverterMode'] == 'STARTING':\n self.setDriver('GV4', 1)\n else:\n self.setDriver('GV4', 0)\n except Exception as ex:\n LOGGER.error('SEInverter updateInfo failed! {}'.format(ex))\n\n def query(self, command=None):\n self.reportDrivers()\n\n drivers = [{'driver': 'ST', 'value': 0, 'uom': 73},\n {'driver': 'GV0', 'value': 0, 'uom': 56},\n {'driver': 'CPW', 'value': 0, 'uom': 56},\n {'driver': 'CLITEMP', 'value': 0, 'uom': 4},\n {'driver': 'CV', 'value': 0, 'uom': 72},\n {'driver': 'GV1', 'value': 0, 'uom': 72},\n {'driver': 'GV2', 'value': 0, 'uom': 1},\n {'driver': 'GV3', 'value': 0, 'uom': 90},\n {'driver': 'GV4', 'value': 0, 'uom': 25}\n ]\n id = 'SEINVERTER'\n commands = {\n 'QUERY': query\n }\n\nclass SEBattery(polyinterface.Node):\n def __init__(self, controller, primary, address, name, site_id, serial_num, site_tz, battery):\n super().__init__(controller, primary, address, name)\n self.serial_num = serial_num\n self.site_id = site_id\n self.site_tz = site_tz\n self.battery = battery\n\n def start(self):\n self.updateInfo()\n\n def updateInfo(self, long_poll=False):\n try:\n ''' Battery does not query anything right now but depends on the site node to supply information to save on the number of API calls '''\n self.setDriver('GPV', float(self.battery['nameplateCapacity']))\n except Exception as ex:\n LOGGER.error('SEBattery updateInfo failed! {}'.format(ex))\n\n def updateData(self, batt_data=None):\n LOGGER.debug(batt_data)\n if batt_data is None:\n return False\n # Take latest data point\n data = batt_data[-1]\n self.setDriver('ST', data['power'])\n self.setDriver('BATLVL', round(float(data['batteryPercentageState']), 1))\n\n def query(self, command=None):\n self.reportDrivers()\n\n drivers = [{'driver': 'ST', 'value': 0, 'uom': 73},\n {'driver': 'BATLVL', 'value': 0, 'uom': 51},\n {'driver': 'GPV', 'value': 0, 'uom': 56}\n ]\n\n id = 'SEBATT'\n commands = {\n 'QUERY': query\n }\n\n\nif __name__ == \"__main__\":\n try:\n polyglot = polyinterface.Interface('SolarEdge')\n polyglot.start()\n control = Controller(polyglot)\n control.runForever()\n except (KeyboardInterrupt, SystemExit):\n sys.exit(0)\n" } ]
2
vuminhph/news-scraping
https://github.com/vuminhph/news-scraping
9574bebccb7826c718c270a74d8c21666387be8b
d5df939f964b34cb59b77febede5ee6e96693387
50d41c2c14ab18bf6f8b52992ad8ac1819043385
refs/heads/master
2022-04-19T03:32:33.337260
2020-04-23T09:52:41
2020-04-23T09:52:41
258,159,671
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7117646932601929, "alphanum_fraction": 0.7235293984413147, "avg_line_length": 16, "blob_id": "03e7482b56c3d08a98568d80d86fbc434c2cb638", "content_id": "19c7964616b26857af3354b50e267396c2d43498", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 170, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/README.md", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "Use scrapy to scrape data from popular news outlets: \n kenh14.vn\n zing.vn\n thanhnien.vn\n tuoitre.vn\n vietnamnet.vn\n viblo.vn\n dantri.vn\n soha.vn\n nguoiduatin.vn\n" }, { "alpha_fraction": 0.5357428789138794, "alphanum_fraction": 0.538345992565155, "avg_line_length": 46.2512092590332, "blob_id": "f0db6b5f538e202b4556d634980e8deae0d754d9", "content_id": "9339de1e40c011cef6b077f2873ac8f3cf7f37e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9988, "license_type": "no_license", "max_line_length": 187, "num_lines": 207, "path": "/news/spiders/zing.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\n\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass ZingSpider(scrapy.Spider):\r\n name = 'zing'\r\n allowed_domains = ['news.zing.vn']\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://news.zing.vn/\", callback=self.logged_in)]\r\n # return [scrapy.Request(\"https://news.zing.vn/chu-tich-tap-doan-hoa-binh-chung-toi-muon-nang-tam-du-lich-viet-post999148.html\", callback=self.parse_article, meta={'viral': '20'})]\r\n # return [scrapy.Request(\"https://news.zing.vn/video-doan-van-hau-toi-met-nhung-luon-san-sang-cho-tuyen-viet-nam-post998925.html\", callback=self.parse_video)]\r\n\r\n def logged_in(self, response):\r\n # scrape news\r\n for href in response.xpath('//*[@id=\"zing-header\"]//div[@class = \"subcate\"]//li/a/@href'):\r\n yield response.follow(href)\r\n # scrape video\r\n video_href = response.xpath(\r\n '//*[@id=\"section-multimedia\"]//*[text()=\"VIDEO\"]/@href').get()\r\n yield response.follow(video_href, self.parse_video_nav)\r\n\r\n def parse(self, response):\r\n for article in response.xpath('//*[@id=\"news-latest\"]/section/div//article'):\r\n viral_count = {\r\n 'viral-count': article.xpath('./descendant::span[@class = \"viral-count \"]/text()').get()\r\n }\r\n\r\n href = article.xpath('./descendant::a/@href').get()\r\n try:\r\n yield scrapy.Request(\"https://news.zing.vn\"+href, self.parse_article, meta={\r\n 'viral': viral_count})\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n next_page = response.xpath(\r\n '//*[@id=\"news-latest\"]/section/div/p/a/@href').get()\r\n if next_page != None:\r\n yield response.follow(next_page, self.parse)\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\r\n '//script[contains(text(),\"NewsArticle\")]/text()').get()\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta elements\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'category': response.xpath('//p[@class = \"the-article-category\"]/a/text()').get(),\r\n 'organization': 'zing',\r\n 'related_urls': response.xpath('//div[@class = \"article-list layout-grid-3\"]//article/p/a/@href').getall(),\r\n 'url': response.url\r\n }\r\n article.update(elems)\r\n article.update(response.meta['viral'])\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//*[@id=\"page-article\"]/div[@class=\"page-wrapper\"]/descendant::div[@class = \"the-article-body\"]/p/text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n for index, src in enumerate(response.xpath('//*[@id=\"page-article\"]/div[@class=\"page-wrapper\"]/descendant::table[@class = \"picture\"]//img/@src').getall(), 1):\r\n images.update({'image' + str(index): src})\r\n article.update({'image-urls': images})\r\n\r\n # get video url\r\n videos = {}\r\n for index, src in enumerate(response.xpath('//figure[@class=\"video cms-video\"]/@data-video-src').getall(), 1):\r\n videos.update({'video' + str(index): src})\r\n article.update({'video urls': videos})\r\n\r\n # get comments\r\n id = response.xpath('//@article-id').get()\r\n cmt_request = \"https://api.news.zing.vn/api/comment.aspx?action=get&id=\"+id\r\n yield scrapy.Request(cmt_request, callback=self.parse_comments, meta={'article': article})\r\n\r\n def parse_comments(self, response):\r\n article = response.meta['article']\r\n\r\n str = ''\r\n for a in response.xpath('//text()').getall():\r\n str += a\r\n\r\n dict = json.loads(str, strict=False)\r\n if len(dict) is not 0:\r\n dict.pop('current_page')\r\n comments_list = dict.get('comments')\r\n if comments_list is not None and len(comments_list) is not 0:\r\n for comment in comments_list:\r\n comment['SenderFullName'] = comment.pop('DisplayName')\r\n comment['CommentContent'] = comment.pop('Comment')\r\n comment['CreatedDate'] = comment.pop('CreationDate')\r\n comment['Liked'] = comment.pop('Like')\r\n if comment['Replies'] is not None:\r\n for reply in comment['Replies']:\r\n reply['SenderFullName'] = reply.pop('DisplayName')\r\n reply['CommentContent'] = reply.pop('Comment')\r\n reply['CreatedDate'] = reply.pop('CreationDate')\r\n reply['Liked'] = reply.pop('Like')\r\n reply['Replies'] = []\r\n\r\n article.update(dict)\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n\r\n def parse_video_nav(self, response):\r\n for video in response.xpath('//div[@class = \"article-list listing-layout\"]/article'):\r\n try:\r\n viral = video.xpath(\r\n './header/p[@class = \"article-meta\"]//span[@class = \"viral-count\"]/text()').get()\r\n if viral is None:\r\n viral = 'None'\r\n view = video.xpath(\r\n './header/p[@class = \"article-meta\"]//span[@class = \"view-count\"]/text()').get()\r\n if view is None:\r\n view = 'None'\r\n viral = {\r\n 'viral-count': viral,\r\n 'view-count': view\r\n }\r\n passing_url = \"https://news.zing.vn\" + \\\r\n video.xpath('./p/a/@href').get()\r\n yield scrapy.Request(passing_url, callback=self.parse_video, meta={'viral': viral})\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n next_page = response.xpath(\r\n '// div[@class=\"article-list listing-layout\"]/ul[@class = \"pagination\"]//@href')\r\n if next_page is not None:\r\n next_page = next_page[0]\r\n yield response.follow(next_page, self.parse_video_nav)\r\n\r\n def parse_video(self, response):\r\n video = {}\r\n\r\n # get ld+json\r\n ld_json = response.xpath(\r\n '//script[@type = \"application/ld+json\"]/text()').get()\r\n video = json.loads(ld_json, strict=False)\r\n\r\n # get elems\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'category': 'textArticle',\r\n 'related_urls': response.xpath('//div[@class = \"article-list layout-grid-3\"]//article/p/a/@href').getall(),\r\n 'url': response.url\r\n }\r\n video.update(elems)\r\n\r\n # get video source\r\n video.update({\"video-source\": response.xpath(\r\n '//div[@id = \"video-featured\"]//video/source[@res=\"720\"]/@src').get()})\r\n\r\n # get viral info\r\n video.update(response.meta['viral'])\r\n\r\n # get comments\r\n id = response.xpath('//@article-id').get()\r\n cmt_request = \"https://api.news.zing.vn/api/comment.aspx?action=get&id=\"+id\r\n yield scrapy.Request(cmt_request, callback=self.parse_comments, meta={'article': video})\r\n" }, { "alpha_fraction": 0.5477628111839294, "alphanum_fraction": 0.5518465638160706, "avg_line_length": 47.85840606689453, "blob_id": "14ee417c04cd18db785ab8b405b9207c501a4b61", "content_id": "89f875b5795f020d7a70d0c532e216d782dbdb8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5632, "license_type": "no_license", "max_line_length": 281, "num_lines": 113, "path": "/news/spiders/afamily.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport scrapy\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\nclass AfamilySpider(CrawlSpider):\r\n name = \"afamily\"\r\n custom_settings = {\r\n 'CONCURRENT_REQUESTS': 100,\r\n 'REACTOR_THREADPOOL_MAXSIZE': 20,\r\n 'LOG_LEVEL': 'INFO',\r\n 'COOKIES_ENABLED': False,\r\n 'RETRY_ENABLED': False,\r\n 'REDIRECT_ENABLED': False,\r\n 'AJAXCRAWL_ENABLED': True,\r\n }\r\n allowed_domains = ['http://afamily.vn/', 'sharefb.cnnd.vn', ]\r\n start_urls = ['http://afamily.vn/dep.chn',]\r\n\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['afamily.vn']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n image = dict()\r\n images = []\r\n try:\r\n ld_json = response.xpath('//script[contains(text(),\"NewsArticle\")]/text()').get()\r\n if ld_json is None:\r\n return 0\r\n else:\r\n ld_json = ld_json\r\n ld_json = json.loads(ld_json)\r\n ld_json = time.timestamp_converter(ld_json)\r\n article.update(ld_json)\r\n except ValueError:\r\n return 0\r\n title = response.xpath('//meta[@property=\"og:title\"]/@content').get()\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n # get meta\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//head/meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//meta[@name='copyright']/@content\").get()})\r\n article.update({'language': response.xpath(\"//meta[@name='Language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'organization': 'Afamily'})\r\n # author, content, title\r\n content = ''\r\n title = response.xpath('//div[@class=\"w700 mr-40 fl\"]/h1/text()').getall()\r\n article.update({'title': title})\r\n for text in response.xpath(\r\n '(//div[@id=\"af-detail-content\"]/p/text())|(//div[@data-role=\"content\"]/div/span/text())|(//p['\r\n '@class=\"MsoNormal\"]/text())|(//*[@id=\"af-detail-content\"]/div/div/div/text())|(//*['\r\n '@id=\"af-detail-content\"]/div/div/div/span/text())|(//*[@id=\"af-detail-content\"]/div/div/p/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n if content is not None:\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n else:\r\n word_count = -1\r\n article.update({'word_count': word_count})\r\n url_image = response.xpath('//meta[@property=\"og:image\"]/@content').get()\r\n if url_image is not None:\r\n image.update({'url': response.xpath('//meta[@property=\"og:image\"]/@content').get()})\r\n image.update({'alt': response.xpath('//meta[@property=\"og:image:alt\"]/@content').get()})\r\n image.update({'width': response.xpath('//meta[@property=\"og:image:width\"]/@content').get()})\r\n image.update({'height': response.xpath('//meta[@property=\"og:image:height\"]/@content').get()})\r\n images.append(image)\r\n article.update({'image': images})\r\n\r\n # get thumbnail\r\n thumbnail = response.xpath('(//div[@class=\"VCSortableInPreviewMode LayoutAlbumWrapper alignJustify noCaption\"]/div/div/div/figure/a/@href)|(//div[@type=\"Photo\"]/div/a/img/@src)|(//figure[@type=\"Photo\"]/div/a/img/@src)|(//a[@class=\"detail-img-lightbox\"]/img/@src)').getall()\r\n article.update({'thumbnail': thumbnail})\r\n with open(\"body.html\",\"wb\") as f:\r\n f.write(response.body)\r\n\r\n # get likes,comments\r\n yield scrapy.Request('http://sharefb.cnnd.vn/?urls=' + response.url, callback=self.parse_interations,\r\n headers={'Accept': 'application/json, text/javascript, */*; q=0.01',\r\n 'Origin': 'https://afamily.vn',\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'Referer': article.get('link')},\r\n meta={'article': article})\r\n\r\n def parse_interations(self, response):\r\n dict1 = {}\r\n str1 = response.xpath('//text()').get()\r\n article = response.meta['article']\r\n list_inter = json.loads(str1)\r\n dict_inter = dict(list_inter[0])\r\n del dict_inter['url']\r\n article.update(dict_inter)\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('link'))\r\n self.articleCount += 1\r\n yield article" }, { "alpha_fraction": 0.5276342630386353, "alphanum_fraction": 0.5393768548965454, "avg_line_length": 54.504425048828125, "blob_id": "53ee04f1a36ff36bdd080709e1fb8abae356c8b7", "content_id": "a7a74fd4520a4b10fbdf5634d3600c21e752b945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6387, "license_type": "no_license", "max_line_length": 162, "num_lines": 113, "path": "/news/spiders/saostar.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nfrom scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport modules.timeConverter as time\r\nclass SaostarSpider(CrawlSpider):\r\n name = \"saostar\"\r\n allowed_domains = ['saostar.vn', 'sharefb.cnnd.vn', 'www.facebook.com']\r\n start_urls = ['http://saostar.vn/']\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['saostar.vn']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n image = dict()\r\n images = []\r\n title = response.xpath('//div[@class=\"head-article\"]/h1/@data-title').get()\r\n if title is not None:\r\n # get meta\r\n article.update({'headline': response.xpath('//meta[@itemprop=\"headline\"]/@content').get()})\r\n article.update({'datePublished': response.xpath('//time[@itemprop=\"datePublished\"]/@datetime').get()})\r\n article.update({'dateModified': response.xpath('//time[@itemprop=\"dateModified\"]/@datetime').get()})\r\n article.update({'publisher': response.xpath('//div[@itemprop=\"publisher\"]/span/text()').get()})\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//head/meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//head/meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//head/meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//head/meta[@name='copyright']/@content\").get()})\r\n article.update({'Language': response.xpath(\"//head/meta[@name='Language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'organization': 'Saostar'})\r\n article = time.timestamp_converter(article)\r\n url_img = response.xpath('//meta[@property=\"og:image\"]/@content').get()\r\n if url_img is not None:\r\n image.update({'url': response.xpath('//meta[@property=\"og:image\"]/@content').get()})\r\n image.update({'alt': response.xpath('//meta[@property=\"og:image:alt\"]/@content').get()})\r\n image.update({'width': response.xpath('//meta[@property=\"og:image:width\"]/@content').get()})\r\n image.update({'height': response.xpath('//meta[@property=\"og:image:height\"]/@content').get()})\r\n images.append(image)\r\n article.update({'image': images})\r\n # title, link, author, content\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n article.update({'author': response.xpath(\"//span[@class='writer']/text()\").get()})\r\n content = ''\r\n for text in response.xpath('(//div[@id=\"content_detail\"]/p/text())|'\r\n '(//span['\r\n '@class=\"wp-caption-text\"]/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n if content is not None:\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n else:\r\n word_count = -1\r\n article.update({'word_count': word_count})\r\n\r\n # get image\r\n thumbnail = response.xpath('(//p/a/img/@src)|(//strong/a/img/@src)|(//div/a/img/@src)').getall()\r\n if thumbnail is not []:\r\n article.update({'thumbnail': thumbnail})\r\n # get relate_url\r\n relate_url = []\r\n htags = response.xpath(\r\n '(//div[@class=\"content-block\"]/div[@class=\"post mt15 js-post \"]/h4[@class=\"post-title pl15 dis-inline-block\"])|(//h3[@class=\"post-title mb10\"])')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/text()').get()\r\n if headline is not []:\r\n url = str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": relate_url})\r\n # get interactions\r\n\r\n url = response.xpath('//meta[@itemprop=\"url\"]/@content').get()\r\n like_request = \"https://www.facebook.com/v2.8/plugins/like.php?action=like&channel=https%3A%2F%2Fstaticxx\" \\\r\n \".facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df37cc7337bc398%26domain\" \\\r\n \"%3Dsaostar.vn%26origin%3Dhttps%253A%252F%252Fsaostar.vn%252Ff3ecd646e17999%26relation\" \\\r\n \"%3Dparent.parent&container_width=0&href=\" + url \\\r\n + \"&layout=button_count&locale=vi_VN&sdk=joey&share=true&show_faces=false\"\r\n yield scrapy.Request(like_request, callback=self.parse_like, meta={'data': article})\r\n else:\r\n pass\r\n\r\n def parse_like(self, response):\r\n log = response.meta['data']\r\n likes = response.xpath('(//span[@id=\"u_0_3\"]/text())|(//*[@id=\"u_0_4\"]/text())').get()\r\n if likes is not None:\r\n if \"k\" in likes.lower():\r\n likes = likes.lower()\r\n likes = likes.replace(\",\", \".\")\r\n likes = likes.replace(\"k\", \"\")\r\n likes = float(likes) * 1000\r\n likes = int(likes)\r\n else:\r\n likes = -1\r\n log.update({'like_count': likes})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n log.get('link'))\r\n self.articleCount += 1\r\n yield log\r\n\r\n" }, { "alpha_fraction": 0.5100607872009277, "alphanum_fraction": 0.5195887684822083, "avg_line_length": 47.23765563964844, "blob_id": "0ab63067e8eb6a1a69985cf61d7013dfac03eabd", "content_id": "721953af36ff17123c72ff27e2bb39914fc26f8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15954, "license_type": "no_license", "max_line_length": 358, "num_lines": 324, "path": "/news/spiders/thanhnien.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass ThanhnienSpider(scrapy.Spider):\r\n name = 'thanhnien'\r\n allowed_domains = ['thanhnien.vn']\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://thanhnien.vn\", callback=self.logged_in)]\r\n # return [scrapy.Request(\"https://thanhnien.vn/tai-chinh-kinh-doanh/mui-ne-lao-dao-vi-thieu-khach-nga-516854.html\", callback=self.parse_article)]\r\n # return [scrapy.Request(\"https://thanhnien.vn/van-hoa/luat-hong-lan-lon-tien-su-tien-chua-1134512.html\", callback=self.parse_article, meta={'atc_type': \"normal\"})]\r\n # return [scrapy.Request(\"https://xe.thanhnien.vn/thi-truong-xe/ford-trieu-hoi-gan-20000-xe-ban-tai-ranger-nguy-co-chay-do-chap-dien-20442.html\", callback=self.parse_article, meta={'atc_type': \"video\"})]\r\n # return [scrapy.Request(\"https://thethao.thanhnien.vn/bong-da-viet-nam/tuyen-viet-nam-giu-cu-ly-doi-hinh-xuat-sac-va-hau-ve-tan-cong-qua-hay-106518.html\", callback=self.parse_article, meta={'atc_type': \"sport\"})]\r\n\r\n def logged_in(self, response):\r\n urls = [\r\n \"https://thanhnien.vn/thoi-su/\",\r\n \"https://thanhnien.vn/the-gioi/\",\r\n \"https://thanhnien.vn/tai-chinh-kinh-doanh/\",\r\n \"https://thanhnien.vn/doi-song/\",\r\n \"https://thanhnien.vn/van-hoa/\",\r\n \"https://thanhnien.vn/gioi-tre/\",\r\n \"https://thanhnien.vn/giao-duc/\",\r\n \"https://thanhnien.vn/suc-khoe/\",\r\n \"https://thanhnien.vn/du-lich/\",\r\n \"https://thanhnien.vn/cong-nghe/\",\r\n ]\r\n\r\n # scrape articles\r\n for url in urls:\r\n yield scrapy.Request(url, self.parse, meta={'page_index': 1, 'cate': 'normal'})\r\n\r\n # scrape sport articles\r\n yield scrapy.Request(\"https://thethao.thanhnien.vn/\", callback=self.parse_nav, meta={'cate': 'sport'})\r\n\r\n # scrape cars articles\r\n yield scrapy.Request(\"https://xe.thanhnien.vn/\", callback=self.parse_nav, meta={'cate': 'other'})\r\n\r\n # scrape game articles\r\n yield scrapy.Request(\"https://game.thanhnien.vn/\", callback=self.parse_nav, meta={'cate': 'other'})\r\n\r\n # scrape video articles\r\n yield scrapy.Request(\"https://video.thanhnien.vn/\", callback=self.parse_nav, meta={'cate': 'other'})\r\n\r\n def parse_nav(self, response):\r\n cate = response.meta['cate']\r\n if cate == \"other\":\r\n parser = self.parse\r\n if cate == \"sport\":\r\n parser = self.parse_sport_passer\r\n\r\n for href in response.xpath('//nav[@class = \"site-header__nav\"]/a/@href'):\r\n try:\r\n yield response.follow(href, parser, meta={'page_index': 1, 'cate': cate})\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse_sport_passer(self, response):\r\n for segment in response.xpath('//header[@class=\"heading\"]/h3/a/@href'):\r\n try:\r\n yield response.follow(segment, self.parse, meta=response.meta)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse(self, response):\r\n page_index = response.meta['page_index']\r\n cate = response.meta['cate']\r\n\r\n if page_index == 1:\r\n section = response.url\r\n else:\r\n section = response.meta['section']\r\n\r\n if cate == 'normal':\r\n if page_index == 1:\r\n yield response.follow(response.xpath('//div[@class=\"l-content\"]/div[@class=\"highlight\"]//a/@href').get(), self.parse_article, meta={'atc_type': 'normal'})\r\n for href in response.xpath('//div[@class=\"l-content\"]/div[@class=\"feature\"]//h2/a/@href').getall():\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n if page_index is not 1 and section is response.url or response.xpath('//div[@class=\"l-content\"]/div[@class=\"feature\"]//h2/a/@href').get() is None:\r\n return\r\n for href in response.xpath('//div[@class = \"relative\"]/article/a/@href'):\r\n try:\r\n yield response.follow(href, callback=self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n if cate == 'other':\r\n if page_index == 1:\r\n for href in response.xpath('//article[@class=\"spotlight\"]/a/@href'):\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n if response.xpath('//article[@class=\"clearfix\"]/a/@href').get() is None:\r\n return\r\n for href in response.xpath('//article[@class=\"clearfix\"]/a/@href'):\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n if cate == 'sport':\r\n if page_index == 1:\r\n for href in response.xpath('//section[@class=\"highlight clearfix\"]//header/a/@href'):\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n if response.xpath('//div[@class=\"timeline\"]/nav//a[@id=\"ctl00_main_ContentList1_pager_nextControl\"]/@href').get() is None:\r\n return\r\n for href in response.xpath('//div[@class=\"timeline\"]//article//h2/a/@href'):\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n next_page = section + \"trang-\"+str(page_index+1)+\".html\"\r\n yield scrapy.Request(next_page, callback=self.parse, meta={'page_index': page_index+1, 'section': section, 'cate': cate})\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\r\n \"//script[contains(text(),'NewsArticle')]/text()\").get()\r\n ld_json_dict = json.loads(ld_json)[0]\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n # get meta elements\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'category': response.xpath('//h2[@class = \"headline\"]/a/text()').get(),\r\n 'organization': 'thanh niên',\r\n 'url': response.url,\r\n # 'related_urls': response.xpath('//div[@class = \"article-oldnew\"]//div/div[@class = \"article-oldnew-img\"]/a/@href').getall()\r\n }\r\n article.update(elems)\r\n\r\n # get video url\r\n videos = []\r\n\r\n try:\r\n url_finder = response.xpath(\r\n '//figure[@itemprop = \"associatedMedia\"]/script/text()').get()\r\n pv1 = url_finder.find(\"src\")\r\n pv2 = url_finder[pv1:].find('\"') + pv1+1\r\n pv3 = url_finder[pv2:].find('\"') + pv2\r\n video_url = url_finder[pv2:pv3]\r\n videos.append(video_url)\r\n except:\r\n pass\r\n\r\n video_url = response.xpath(\r\n '//table[@class=\"video\"]//@data-video-src').get()\r\n videos.append(video_url)\r\n\r\n article.update({'videos-url': videos})\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//div[@id=\"abody\"]//p[contains(@style,\"margin\")or contains(@style,\"text\")]/text()').getall():\r\n content += text.strip()\r\n for text in response.xpath('//*[@id=\"abody\"]//div/text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n ava_index = 0\r\n for ava_index, src in enumerate(response.xpath('//*[@id=\"contentAvatar\"]//a/img/@src').getall(), 1):\r\n images.update({'image' + str(ava_index): src})\r\n index = ava_index + 1\r\n for index, src in enumerate(response.xpath('//*[@class=\"imagefull\"]//@data-src').getall(), index):\r\n images.update({'image' + str(index): src})\r\n\r\n article.update({'image-urls': images})\r\n\r\n # get comments\r\n comments_count = response.xpath('//*[@id=\"commentcount\"]/text()').get()\r\n article.update({'comments-count': comments_count})\r\n comments = []\r\n\r\n for comment in response.xpath('//*[@id=\"commentcontainer\"]/div'):\r\n primary_comment = comment.xpath(\r\n './div[@class = \"primary-comment\"]')\r\n primary_ava = primary_comment.xpath(\r\n './/div[@class = \"ava\"]/img/@data-src').get()\r\n primary_user = primary_comment.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"meta\"]/h4/text()').get()\r\n if primary_user is not None:\r\n primary_user = primary_user.strip()\r\n primary_geo = primary_comment.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"meta\"]/time/text()').get()\r\n if primary_geo is not None:\r\n primary_geo = primary_geo.strip()\r\n primary_content = primary_comment.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"comment\"]/text()').get()\r\n if primary_content is not None:\r\n primary_content = primary_content.strip()\r\n primary_time = primary_comment.xpath(\r\n './/div[@class = \"meta\"]/time/@rel').get()\r\n primary_likes = primary_comment.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"reply\"]//a[@class = \"likebtn\"]//text()').get()\r\n if primary_likes is not None:\r\n primary_likes = primary_likes.strip()\r\n strings = [s for s in primary_likes.split() if s.isdigit()]\r\n if len(strings) != 0:\r\n primary_likes = strings[0]\r\n else:\r\n primary_likes = '0'\r\n\r\n secondary_dict = []\r\n counter = 0\r\n for counter, reply in enumerate(comment.xpath(\r\n './/div[@class = \"secondary-comment\"]'), 1):\r\n secondary_ava = reply.xpath(\r\n './/div[@class = \"ava\"]/img/@data-src').get()\r\n secondary_user = reply.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"meta\"]/h4/text()').get()\r\n if secondary_user is not None:\r\n secondary_user = secondary_user.strip()\r\n secondary_geo = reply.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"meta\"]/time/text()').get()\r\n if secondary_geo is not None:\r\n secondary_geo = secondary_geo.strip()\r\n secondary_content = reply.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"comment\"]/text()').get()\r\n if secondary_content is not None:\r\n secondary_content = secondary_content.strip()\r\n secondary_time = reply.xpath(\r\n './/div[@class = \"meta\"]/time/@rel').get()\r\n secondary_likes = reply.xpath(\r\n './/div[@class = \"data\"]/div[@class = \"reply\"]//a[@class = \"likebtn\"]//text()').get()\r\n if secondary_likes is not None:\r\n secondary_likes = secondary_likes.strip()\r\n strings = [s for s in secondary_likes.split()\r\n if s.isdigit()]\r\n if len(strings) != 0:\r\n secondary_likes = strings[0]\r\n else:\r\n secondary_likes = '0'\r\n\r\n secondary_dict.append({'SenderAvatar': secondary_ava,\r\n 'SenderFullName': secondary_user,\r\n 'PublishedGeo': secondary_geo,\r\n 'CommentContent': secondary_content,\r\n 'CreatedDate': secondary_time,\r\n 'Liked': secondary_likes,\r\n 'Replies-count': 0,\r\n 'Replies': []})\r\n\r\n comments.append({\r\n 'SenderAvatar': primary_ava,\r\n 'SenderFullName': primary_user,\r\n 'PublishedGeo': primary_geo,\r\n 'CommentContent': primary_content,\r\n 'CreatedDate': primary_time,\r\n 'Liked': primary_likes,\r\n 'Replies-count': counter,\r\n 'Replies': secondary_dict if counter != 0 else None\r\n })\r\n article.update({'comments': comments})\r\n\r\n # get likes\r\n url = response.xpath(\r\n '//li[@class = \"zalo-share-button\"]/@data-href').get()\r\n if url is None:\r\n url = response.xpath('//li[@class=\"fb-share\"]/a/@href').get()\r\n url = url.replace(\"=\", \"%3D\")\r\n url = url.replace(\"/\", \"%2F\")\r\n url = url.replace(\":\", \"%3A\")\r\n\r\n like_request = \"https://www.facebook.com/v3.1/plugins/like.php?action=like&app_id=288067561729014&channel=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df1b1dac16a53484%26domain%3Dthanhnien.vn%26origin%3Dhttps%253A%252F%252Fthanhnien.vn%252Ff20b42488425504%26relation%3Dparent.parent&container_width=0&href=\" + \\\r\n url+\"&layout=button_count&locale=en_US&sdk=joey&share=true&show_faces=false&size=large\"\r\n yield scrapy.Request(like_request, callback=self.parse_likes, meta={'article': article})\r\n\r\n def parse_likes(self, response):\r\n article = response.meta['article']\r\n\r\n likes = response.xpath(\r\n '//button[@type=\"submit\"]/div/span[3]/text()').get()\r\n if likes is None:\r\n likes = '0'\r\n\r\n article.update({'likes-counter': likes})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.5307996273040771, "alphanum_fraction": 0.5330952405929565, "avg_line_length": 48.262821197509766, "blob_id": "4b645fb8e15383724d4b306f5627d8dbcae2ff00", "content_id": "081d80440453180dc3b97a27024ffaf67c3658bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7845, "license_type": "no_license", "max_line_length": 204, "num_lines": 156, "path": "/news/spiders/tuoitre.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport scrapy\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\nclass TuoitreSpider(CrawlSpider):\r\n\r\n name = \"tuoitre\"\r\n custom_settings = {\r\n 'CONCURRENT_REQUESTS': 100,\r\n 'REACTOR_THREADPOOL_MAXSIZE': 20,\r\n 'LOG_LEVEL': 'INFO',\r\n 'COOKIES_ENABLED': False,\r\n 'RETRY_ENABLED': False,\r\n 'REDIRECT_ENABLED': False,\r\n 'AJAXCRAWL_ENABLED': True,\r\n }\r\n allowed_domains = ['tuoitre.vn']\r\n start_urls = ['https://tuoitre.vn/']\r\n\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['tuoitre.vn'], deny_domains=['vieclam.tuoitre.vn']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n date = dict()\r\n title = response.xpath('//head/meta[@property=\"og:title\"]/@content').extract_first()\r\n if title is not None:\r\n\r\n date.update({'datePublished': response.xpath('//meta[@property=\"article:published_time\"]/@content').get()})\r\n date.update({'dateModified': response.xpath('//meta[@property=\"article:modified_time\"]/@content').get()})\r\n if date is not None:\r\n try:\r\n date = time.timestamp_converter(date)\r\n article.update(date)\r\n except:\r\n pass\r\n\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n # get meta\r\n article.update({'headline': response.xpath('//meta[@itemprop=\"headline\"]/@content').get()})\r\n article.update({'type': response.xpath(\"//meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//meta[@name='copyright']/@content\").get()})\r\n article.update({'language': response.xpath(\"//meta[@name='Language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'organization': 'Tuổi trẻ'})\r\n\r\n # author\r\n content = ''\r\n author = ''\r\n for text in response.xpath('(//div|//p)[contains(@class, \"author\") or contains(@class, \"author_single\") or contains(@class,\"authorvideo\") or contains(@class,\"credit-text\")]//text()').getall():\r\n author += text.strip()\r\n article.update({'author': author})\r\n for text in response.xpath('//div[contains(@id,\"main-detail-body\") or contains(@class,\"sp-detail-content\") or contains(@class,\"fck\")]/p//text()').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n # get thumbnail\r\n thumbnail = response.xpath('(//div[@type=\"Photo\"]/div/a/img/@src)|(//div[@type=\"Photo\"]/div/img/@src)|(//td/a/img/@src)').getall()\r\n article.update({'thumbnail': thumbnail})\r\n # get images\r\n images = []\r\n image = dict()\r\n image.update({'url': response.xpath('//meta[@property=\"og:image\"]/@content').get()})\r\n image.update({'alt': response.xpath('//meta[@property=\"og:image:alt\"]/@content').get()})\r\n image.update({'width': response.xpath('//meta[@property=\"og:image:width\"]/@content').get()})\r\n image.update({'height': response.xpath('//meta[@property=\"og:image:height\"]/@content').get()})\r\n images.append(image)\r\n article.update({'image': images})\r\n # get relate_url\r\n relate_url = []\r\n htags = response.xpath('//ul[@class=\"list-news\"]/li/div[@class=\"name-title\"]')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/text()').get()\r\n url = \"https://tuoitre.vn\" + str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": relate_url})\r\n # get inf cmt\r\n objectid = response.xpath(\r\n '//div[@id=\"tagandnetwork\"]/div[@class=\"tagandtopicandbanner\"]/section/@data-objectid').get()\r\n if objectid is None:\r\n return 0\r\n else:\r\n objectid = objectid\r\n datasort = response.xpath(\r\n '//div[@id=\"tagandnetwork\"]/div[@class=\"tagandtopicandbanner\"]/section/@data-sort').get()\r\n if datasort is None:\r\n return 0\r\n else:\r\n datasort = datasort\r\n\r\n pagesize = response.xpath(\r\n '//div[@id=\"tagandnetwork\"]/div[@class=\"tagandtopicandbanner\"]/section/@data-pagesize').get()\r\n if pagesize is None:\r\n return 0\r\n else:\r\n pagesize = pagesize\r\n objecttype = response.xpath(\r\n '//div[@id=\"tagandnetwork\"]/div[@class=\"tagandtopicandbanner\"]/section/@data-objecttype').get()\r\n if objecttype is None:\r\n return 0\r\n else:\r\n objecttype = objecttype\r\n id_article = dict()\r\n id_article.update({'objectid': objectid, 'datasort': datasort, 'pagesize': pagesize, 'objecttype': objecttype})\r\n # get total likes\r\n total_like = \"https://s1.tuoitre.vn/count-object.htm?newsId=\" + objectid\r\n\r\n yield scrapy.Request(total_like, callback=self.parse_like,\r\n headers={'Accept': '*/*',\r\n 'Origin': 'https://tuoitre.vn',\r\n 'Referer': response.url,\r\n 'Sec-Fetch-Mode': 'cors',\r\n },\r\n meta={'article': article, 'id_article': id_article})\r\n\r\n def parse_like(self, response):\r\n log = response.meta['article']\r\n id_article = response.meta['id_article']\r\n log.update({'like_count': response.text})\r\n cmt_resquest = 'https://id.tuoitre.vn/api/getlist-comment.api?pageindex=1&pagesize=' + id_article[\r\n 'pagesize'] + '&objId=' + id_article['objectid'] + '&objType=' + id_article['objecttype'] + '&sort=' + \\\r\n id_article['datasort']\r\n yield scrapy.Request(cmt_resquest, callback=self.parse_comment, meta={'data1': log})\r\n\r\n def parse_comment(self, response):\r\n str1 = ''\r\n for text in response.xpath('//text()').getall():\r\n str1 += text\r\n dict = json.loads(str1)\r\n article = response.meta['data1']\r\n article.update({'comment_article': dict})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('link'))\r\n self.articleCount += 1\r\n yield article\r\n" }, { "alpha_fraction": 0.5166088342666626, "alphanum_fraction": 0.5199140906333923, "avg_line_length": 50.599998474121094, "blob_id": "6b1f74b5fd7de43213c95d360f32f10669b6c6fd", "content_id": "0b42eddf5bafe4aa67fe06855fbcdc71d08df4bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6051, "license_type": "no_license", "max_line_length": 222, "num_lines": 115, "path": "/news/spiders/vtv.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport scrapy\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass VtvSpider(CrawlSpider):\r\n name = \"vtv.vn\"\r\n allowed_domains = ['vtv.vn', 'sharefb.cnnd.vn']\r\n start_urls = ['http://vtv.vn/']\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['vtv.vn']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n title = response.xpath('(//h1[@class=\"title_detail\"]/text())|(//div[@class=\"infomationdetail clearfix\"]/h1/text())').get()\r\n if title is not None:\r\n # get ld_json\r\n ld_json = response.xpath('//head/script[@type=\"application/ld+json\"]/text()').get()\r\n if ld_json is not None:\r\n try:\r\n ld_json = json.loads(ld_json)\r\n ld_json = time.timestamp_converter(ld_json)\r\n article.update(ld_json)\r\n except ValueError:\r\n pass\r\n if 'dateModified' in article.keys():\r\n dateModified = response.xpath('//meta[@name=\"pubdate\"]/@content').get()\r\n article.update({'dateModified': time.Vnex_timestamp(dateModified)})\r\n if 'datePublished' in article.keys():\r\n datePublished = response.xpath('//meta[@name=\"lastmod\"]/@content').get()\r\n article.update({'datePublished': time.Vnex_timestamp(datePublished)})\r\n # get meta\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//head/meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//head/meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//head/meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//head/meta[@name='copyright']/@content\").get()})\r\n article.update({'language': response.xpath(\"//head/meta[@name='Language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'organization': 'VTV'})\r\n title = response.xpath('//meta[@property=\"og:title\"]/@content').get()\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n # author\r\n content = ''\r\n author = ''\r\n for text in response.xpath('(//p[@class=\"news-info\"]/b/text())|(//p[@class=\"author\"]/text())').getall():\r\n author += text.strip()\r\n article.update({'author': author})\r\n for text in response.xpath(\r\n '(//div[@id=\"entry-body\"]/p/text())|(//div[@class=\"w638 mgl96\"]/div[@class=\"ta-justify\"]/p/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n # get image\r\n thumbnail = response.xpath('(//div[@class=\"infomationdetail clearfix\"]/img/@src)|(//div[@class=\"noidung\"]/img/@src)|(//div[@type=\"Photo\"]/div/img/@src)|(//figure[@class=\"LayoutAlbumItem\"]/a/img/@src)').getall()\r\n if thumbnail is not None:\r\n article.update({'thumbnail': thumbnail})\r\n else:\r\n article.update({'thumbnail': '-1'})\r\n\r\n # get relate_url\r\n relate_url = []\r\n htags = response.xpath('//div[@class=\"clearfix pdb20\"]/ul/li')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/@title').get()\r\n if headline is not []:\r\n url = \"https://vtv.vn\" + str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": relate_url})\r\n objectid = response.xpath('//div[@class=\"aspNetHidden\"]/input[@id=\"hdNewsId\"]/@value').get()\r\n cmt_resquest = 'https://sharefb.cnnd.vn/?urls=http://vtv.vn/news-' + str(objectid) + '.htm'\r\n yield scrapy.Request(cmt_resquest, callback=self.parse_comment,\r\n headers={'Accept': 'application/json, text/javascript, */*; q=0.01',\r\n 'Origin': 'https://vtv.vn',\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'Referer': response.url},\r\n meta={'article': article})\r\n\r\n def parse_comment(self, response):\r\n str1 = ''\r\n log = response.meta['article']\r\n for text in response.xpath('//text()').getall():\r\n str1 += text\r\n try:\r\n list_inter = json.loads(str1)\r\n dict_inter = dict(list_inter[0])\r\n del dict_inter['url']\r\n log.update(dict_inter)\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n log.get('link'))\r\n self.articleCount += 1\r\n yield log\r\n except:\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n log.get('link'))\r\n self.articleCount += 1\r\n yield log\r\n\r\n" }, { "alpha_fraction": 0.47675374150276184, "alphanum_fraction": 0.49516561627388, "avg_line_length": 35.536678314208984, "blob_id": "134f0f2f0124d46b35ae159e102ee6a09e8eea9e", "content_id": "08e657eaee048209a9686190381852168acb576e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9862, "license_type": "no_license", "max_line_length": 87, "num_lines": 259, "path": "/modules/timeConverter.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\r\nimport re\r\n\r\n\r\ndef timestamp_converter(ld_dict):\r\n date_published = ld_dict.get('datePublished')\r\n date_modified = ld_dict.get('dateModified')\r\n oldFormat = \"%m/%d/%Y %I:%M:%S %p\"\r\n try:\r\n date_published = datetime.strptime(date_published, oldFormat)\r\n date_published = datetime.timestamp(date_published)\r\n\r\n date_modified = datetime.strptime(date_modified, oldFormat)\r\n date_modified = datetime.timestamp(date_modified)\r\n\r\n ld_dict['datePublished'] = date_published\r\n ld_dict['dateModified'] = date_modified\r\n\r\n return ld_dict\r\n except:\r\n oldFormat = \"%Y-%m-%dT%H:%M:%S%z\"\r\n if \"+07:00\" in date_published:\r\n date_published = date_published.replace('+07:00', '+0700')\r\n if \"+07:00\" in date_modified:\r\n date_modified = date_modified.replace('+07:00', '+0700')\r\n if \"+08:00\" in date_published:\r\n date_published = date_published.replace('+08:00', '+0800')\r\n if \"+08:00\" in date_modified:\r\n date_modified = date_modified.replace('+08:00', '+0800')\r\n\r\n try:\r\n date_published = datetime.strptime(date_published, oldFormat)\r\n date_published = datetime.timestamp(date_published)\r\n\r\n date_modified = datetime.strptime(date_modified, oldFormat)\r\n date_modified = datetime.timestamp(date_modified)\r\n\r\n ld_dict['datePublished'] = date_published\r\n ld_dict['dateModified'] = date_modified\r\n\r\n return ld_dict\r\n except:\r\n oldFormat = \"%Y-%m-%dT%H:%M:%S\"\r\n if \".\" in date_published and \".\" in date_modified:\r\n date_published, frag = date_published.split('.')\r\n date_modified, frag = date_modified.split('.')\r\n try:\r\n date_published = datetime.strptime(date_published, oldFormat)\r\n date_published = datetime.timestamp(date_published)\r\n\r\n date_modified = datetime.strptime(date_modified, oldFormat)\r\n date_modified = datetime.timestamp(date_modified)\r\n\r\n ld_dict['datePublished'] = date_published\r\n ld_dict['dateModified'] = date_modified\r\n\r\n return ld_dict\r\n except:\r\n oldFormat = \"%Y-%m-%d\"\r\n try:\r\n date_published = datetime.strptime(\r\n date_published, oldFormat)\r\n date_published = datetime.timestamp(date_published)\r\n\r\n date_modified = datetime.strptime(date_modified, oldFormat)\r\n date_modified = datetime.timestamp(date_modified)\r\n\r\n ld_dict['datePublished'] = date_published\r\n ld_dict['dateModified'] = date_modified\r\n\r\n return ld_dict\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\ndef vietnamnet_timestamp(ld_dict):\r\n oldFormat = \"%d-%m-%YT%H:%M:%S%z\"\r\n date_published = ld_dict.get('datePublished')\r\n date_modified = ld_dict.get('dateModified')\r\n\r\n if \"+07:00\" in date_published:\r\n date_published = date_published.replace('+07:00', '+0700')\r\n if \"+07:00\" in date_modified:\r\n date_modified = date_modified.replace('+07:00', '+0700')\r\n\r\n try:\r\n date_published = datetime.strptime(date_published, oldFormat)\r\n date_published = datetime.timestamp(date_published)\r\n\r\n date_modified = datetime.strptime(date_modified, oldFormat)\r\n date_modified = datetime.timestamp(date_modified)\r\n\r\n ld_dict['datePublished'] = date_published\r\n ld_dict['dateModified'] = date_modified\r\n\r\n return ld_dict\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\ndef Yeah1_timestamp(time):\r\n oldFormat = \"%Y-%m-%dT%H:%M:%S%z\"\r\n if \"+07:00\" in time:\r\n time = time.replace('+07:00', '+0700')\r\n if \"+08:00\" in time:\r\n time = time.replace('+08:00', '+0800')\r\n\r\n time = datetime.strptime(time, oldFormat)\r\n time = datetime.timestamp(time)\r\n return time\r\n\r\n\r\ndef Dspl_timestamp(time):\r\n time1 = \"\"\r\n if 'Thứ hai' in time:\r\n time1 = time.replace('Thứ hai, ', '')\r\n if 'Thứ ba' in time:\r\n time1 = time.replace('Thứ ba, ', '')\r\n if 'Thứ tư' in time:\r\n time1 = time.replace('Thứ tư, ', '')\r\n if 'Thứ năm' in time:\r\n time1 = time.replace('Thứ năm, ', '')\r\n if 'Thứ sáu' in time:\r\n time1 = time.replace('Thứ sáu, ', '')\r\n if 'Thứ bảy' in time:\r\n time1 = time.replace('Thứ bảy, ', '')\r\n if 'Chủ nhật' in time:\r\n time1 = time.replace('Chủ nhật, ', '')\r\n oldFormat = \"%d/%m/%Y | %H:%M\"\r\n if 'GMT+7' in time1:\r\n time1 = time1.replace(' GMT+7', '')\r\n time1 = datetime.strptime(time1, oldFormat)\r\n time1 = datetime.timestamp(time1)\r\n return time1\r\n\r\n\r\ndef Tiin_timestamp(time):\r\n oldFormat = \"%d/%m/%Y %H:%M\"\r\n time = datetime.strptime(time, oldFormat)\r\n time = datetime.timestamp(time)\r\n return time\r\n\r\n\r\ndef Vnex_timestamp(time):\r\n oldFormat = \"%Y-%m-%dT%H:%M:%S%z\"\r\n try:\r\n if \"+07:00\" in time:\r\n time = time.replace('+07:00', '+0700')\r\n if \"+08:00\" in time:\r\n time = time.replace('+08:00', '+0800')\r\n time = datetime.strptime(time, oldFormat)\r\n time = datetime.timestamp(time)\r\n return time\r\n except:\r\n time1 = \"\"\r\n if 'Thứ hai' in time:\r\n time1 = time.replace('Thứ hai, ', '')\r\n if 'Thứ ba' in time:\r\n time1 = time.replace('Thứ ba, ', '')\r\n if 'Thứ tư' in time:\r\n time1 = time.replace('Thứ tư, ', '')\r\n if 'Thứ năm' in time:\r\n time1 = time.replace('Thứ năm, ', '')\r\n if 'Thứ sáu' in time:\r\n time1 = time.replace('Thứ sáu, ', '')\r\n if 'Thứ bảy' in time:\r\n time1 = time.replace('Thứ bảy, ', '')\r\n if 'Chủ nhật' in time:\r\n time1 = time.replace('Chủ nhật, ', '')\r\n oldFormat = \"%d/%m/%Y, %H:%M\"\r\n if '(GMT+7)' in time1:\r\n time1 = time1.replace(' GMT+7', '')\r\n time1 = datetime.strptime(time1, oldFormat)\r\n time1 = datetime.timestamp(time1)\r\n return time1\r\n\r\n\r\ndef comment_time(time):\r\n oldFormat = \"%H:%M | %d/%m/%Y\"\r\n\r\n try:\r\n newTime = datetime.strptime(time, oldFormat)\r\n newTime = datetime.timestamp(newTime)\r\n return newTime\r\n except:\r\n if 'Thứ hai' in time:\r\n time.replace(' Thứ hai', '')\r\n commentDay = 0\r\n if 'Thứ ba' in time:\r\n time.replace('Thứ ba', '')\r\n commentDay = 1\r\n if 'Thứ tư' in time:\r\n time.replace('Thứ tư', '')\r\n commentDay = 2\r\n if 'Thứ năm' in time:\r\n time.replace('Thứ năm', '')\r\n commentDay = 3\r\n if 'Thứ sáu' in time:\r\n time.replace('Thứ sáu', '')\r\n commentDay = 4\r\n if 'Thứ bảy' in time:\r\n time.replace('Thứ bảy', '')\r\n commentDay = 5\r\n if 'Chủ nhật' in time:\r\n time.replace('Chủ nhật', '')\r\n commentDay = 6\r\n oldFormat = \"%H:%M \"\r\n try:\r\n day = datetime.today() - timedelta(datetime.today().weekday() - commentDay)\r\n newTime = datetime.strptime(time, oldFormat)\r\n newTime = newTime.replace(\r\n year=day.year, month=day.month, day=day.day)\r\n newTime = datetime.timestamp(newTime)\r\n return newTime\r\n except:\r\n if \".\" in time:\r\n time, frag = time.split('.')\r\n oldFormat = \"%Y-%m-%dT%H:%M:%S\"\r\n try:\r\n newTime = datetime.strptime(time, oldFormat)\r\n newTime = datetime.timestamp(newTime)\r\n return newTime\r\n except:\r\n if \"+07:00\" in time:\r\n time = time.replace(\"+07:00\", \"+0700\")\r\n oldFormat = \"%Y-%m-%dT%H:%M:%S%z\"\r\n try:\r\n newTime = datetime.strptime(time, oldFormat)\r\n newTime = datetime.timestamp(newTime)\r\n return newTime\r\n except:\r\n oldFormat = \"%Hh%M, ngày %d-%m-%Y\"\r\n try:\r\n newTime = datetime.strptime(time, oldFormat)\r\n newTime = datetime.timestamp(newTime)\r\n return newTime\r\n except:\r\n newTime = None\r\n if \"phút trước\" in time:\r\n strings = [s for s in time.split() if s.isdigit()]\r\n time = strings[0]\r\n time = int(time)\r\n newTime = datetime.now() - timedelta(minutes=time)\r\n elif \"giờ trước\" in time:\r\n strings = [s for s in time.split() if s.isdigit()]\r\n time = strings[0]\r\n time = int(time)\r\n newtime = datetime.now() - timedelta(hours=time)\r\n elif \"ngày trước\" in time:\r\n strings = [s for s in time.split() if s.isdigit()]\r\n time = strings[0]\r\n time = int(time)\r\n newtime = datetime.now() - timedelta(days=time)\r\n try:\r\n if newTime is not None:\r\n newTime = datetime.timestamp(newTime)\r\n return newTime\r\n except Exception as e:\r\n print(e)\r\n" }, { "alpha_fraction": 0.5284651517868042, "alphanum_fraction": 0.5497950315475464, "avg_line_length": 45.2186393737793, "blob_id": "740771ae3c1dd7e14c74fa8c8c1cbd84c2c2502e", "content_id": "43dd07fdb1ab2e14a844e975a0eedfff68eeae16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13176, "license_type": "no_license", "max_line_length": 402, "num_lines": 279, "path": "/news/spiders/dantri.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport json\r\n\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass DantriSpider(scrapy.Spider):\r\n name = 'dantri'\r\n allowed_domains = ['dantri.com.vn']\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n # return [scrapy.Request(\"https://dantri.com.vn/video/giai-phap-tan-dung-nguon-lao-dong-cao-tuoi-o-han-quoc-108264.htm\", callback=self.parse_video)]\r\n # return [scrapy.Request(\"https://dantri.com.vn/xa-hoi/chiem-nguong-ngoi-dinh-hon-300-nam-tuoi-dep-nhat-xu-doai-20191016222010573.htm\", callback=self.parse_article)]\r\n # return [scrapy.Request(\"https://dulich.dantri.com.vn/du-lich/hue-tien-phong-dua-he-thong-xe-dap-thong-minh-phuc-vu-du-khach-nguoi-dan-20191013102955719.htm\", callback=self.parse_article)]\r\n # return [scrapy.Request(\"https://dantri.com.vn/the-gioi/co-gai-thuy-dien-goc-viet-va-uoc-mong-chay-bong-tim-cha-me-sau-22-nam-20191003144459720.htm\", callback=self.parse_article)]\r\n return [scrapy.Request(\"https://dantri.com.vn/\", callback=self.logged_in)]\r\n\r\n def logged_in(self, response):\r\n urls = ['https://dantri.com.vn/su-kien.htm',\r\n 'https://dantri.com.vn/xa-hoi.htm',\r\n 'https://dantri.com.vn/the-gioi.htm',\r\n 'https://dantri.com.vn/the-thao.htm',\r\n 'https://dantri.com.vn/giao-duc-khuyen-hoc.htm',\r\n 'https://dantri.com.vn/tam-long-nhan-ai.htm',\r\n 'https://dantri.com.vn/kinh-doanh.htm',\r\n 'https://dantri.com.vn/bat-dong-san.htm',\r\n 'https://dantri.com.vn/van-hoa.htm',\r\n 'https://dantri.com.vn/giai-tri.htm',\r\n 'https://dantri.com.vn/phap-luat.htm',\r\n 'https://dantri.com.vn/nhip-song-tre.htm',\r\n 'https://dantri.com.vn/suc-khoe.htm',\r\n 'https://dantri.com.vn/suc-manh-so.htm',\r\n 'https://dantri.com.vn/o-to-xe-may.htm',\r\n 'https://dantri.com.vn/tinh-yeu-gioi-tinh.htm',\r\n 'https://dantri.com.vn/chuyen-la.htm',\r\n 'https://dantri.com.vn/doi-song.htm',\r\n 'https://dantri.com.vn/ban-doc.htm',\r\n 'https://dantri.com.vn/khoa-hoc-cong-nghe.htm']\r\n\r\n # scrape article\r\n for url in urls:\r\n yield scrapy.Request(url, callback=self.parse)\r\n\r\n # Bo do khong co ld+json phu hop\r\n # scrape travel section\r\n # yield scrapy.Request(\"https://dulich.dantri.com.vn/\", callback=self.parse_travel_nav)\r\n\r\n # scrape video\r\n # yield scrapy.Request(\"https://dantri.com.vn/video-page.htm\", self.parse_video_passer)\r\n\r\n def parse(self, response):\r\n for href in response.xpath('//*[@data-linktype=\"newsdetail\"]/@href'):\r\n try:\r\n yield response.follow(href, callback=self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n next_page = response.xpath(\r\n '//*[@id=\"html\"]/body//div[@class =\"fr\"][1]//@href')[0]\r\n if next_page is not None:\r\n yield response.follow(next_page, callback=self.parse)\r\n\r\n def parse_travel_nav(self, response):\r\n for href in response.xpath('//li[@class=\"normal\"]/a/@href').getall()[1:6]:\r\n try:\r\n url = \"https://dulich.dantri.com.vn\"+href\r\n yield scrapy.Request(url, callback=self.parse_travel, meta={'index': 1, 'segment': url})\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse_travel(self, response):\r\n index = response.meta['index']\r\n segment = response.meta['segment']\r\n\r\n if index == 101 or response.xpath('//ul[@class=\"listcate fl\"]/li[@class = \"normal\"]//@href').get() is None:\r\n return\r\n\r\n if index == 1:\r\n href = response.xpath('//li[@class = \"top\"]//@href').get()\r\n yield response.follow(href, self.parse_article)\r\n for href in response.xpath('//ul[@class=\"listcate fl\"]/li[@class = \"normal\"]//@href'):\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n index += 1\r\n next_page = segment.replace('.htm', '') + \"/trang-\" + \\\r\n str(index)+\".htm\"\r\n yield scrapy.Request(next_page, self.parse_travel, meta={'index': index, 'segment': segment})\r\n\r\n def parse_video_passer(self, response):\r\n PAGE_CAP = 101\r\n for page in range(1, PAGE_CAP):\r\n try:\r\n video_getter = \"https://dantri.com.vn/video/latest/0-\" + \\\r\n str(page)+\"-1000-0.htm\"\r\n yield scrapy.Request(video_getter, callback=self.parse_video)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse_video(self, response):\r\n videos = ''\r\n for a in response.xpath('//text()').getall():\r\n videos += a\r\n if videos == None:\r\n return\r\n\r\n video_dict = json.loads(videos)\r\n for vid in video_dict:\r\n try:\r\n yield {'video-' + str(vid.get('Id')): vid}\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\r\n \"//script[contains(text(),'NewsArticle')]/text()\").get()\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'category': response.xpath('//a[@class = \"breadcrumbitem1\"][contains(@href, \"htm\")]/span/text()').get(),\r\n 'organization': 'dân trí',\r\n 'url': response.url,\r\n 'related_urls': response.xpath('//div[@class = \"article-oldnew\"]//div/div[@class = \"article-oldnew-img\"]/a/@href').getall()\r\n }\r\n article.update(elems)\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//*[@id=\"divNewsContent\"]/p/text()').getall():\r\n content += text.strip()\r\n for text in response.xpath('//*[@class = \"detail-content\"]/p/text()').getall():\r\n content += text.strip()\r\n for text in response.xpath('//div[@class=\"e-body\"]//p/text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n index1 = index2 = 0\r\n for index1, src in enumerate(response.xpath('//*[@id=\"divNewsContent\"]//img/@src').getall(), 1):\r\n images.update({'image' + str(index1): src})\r\n for index2, src in enumerate(response.xpath('//*[@class = \"detail-content\"]//img/@src').getall(), index1 + 1):\r\n images.update({'image' + str(index2): src})\r\n for index3, src in enumerate(response.xpath('//div[@class=\"e-body\"]//figure[contains(@class,\"image\")]//@src').getall(), index2 + 1):\r\n images.update({'image' + str(index3): src})\r\n\r\n article.update({'image-urls': images})\r\n\r\n # get hashtags\r\n hashtags = {}\r\n for index, href in enumerate(response.xpath('//span[@class = \"news-tags-item\"]/a/@href').getall(), 1):\r\n hashtags.update({'tag'+str(index): href})\r\n article.update({'hash-tags': hashtags})\r\n\r\n # get video url\r\n videos = {}\r\n for index, src in enumerate(response.xpath('//div[@class=\"e-body\"]/figure[@class = \"video\"]//@data-src').getall(), 1):\r\n videos.update({'video' + str(index): \"vcdn.dantri.com.vn/\" + src})\r\n article.update(videos)\r\n\r\n # get likes\r\n id = response.xpath('//*[@id=\"hdNewsId\"]/@value').get()\r\n if id is not None:\r\n like_request = \"https://www.facebook.com/v2.3/plugins/like.php?action=like&app_id=164035690775918&channel=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df31c1be4fdc1a28%26domain%3Ddantri.com.vn%26origin%3Dhttps%253A%252F%252Fdantri.com.vn%252Ff3a046e102e74f4%26relation%3Dparent.parent&container_width=0&href=https%3A%2F%2Fdantri.com.vn%2Fnews-\" + \\\r\n id+\".htm&layout=button_count&locale=vi_VN&sdk=joey&share=false&show_faces=false&size=small\"\r\n else:\r\n id = response.xpath('//*[@id=\"hidDistID\"]/@value').get()\r\n if id is not None:\r\n like_request = \"https://www.facebook.com/plugins/like.php?href=\"+response.url + \\\r\n \"&send=false&share=true&layout=standard&width=450&show_faces=false&action=like&colorscheme=light&font&height=35&\"\r\n else:\r\n pv1 = response.url.find('.htm')\r\n pv2 = response.url.find('-', pv1-20) + 1\r\n id = response.url[pv2:pv1]\r\n like_request = \"https://www.facebook.com/v2.3/plugins/like.php?action=like&app_id=164035690775918&channel=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df322cc0314d7894%26domain%3Ddantri.com.vn%26origin%3Dhttps%253A%252F%252Fdantri.com.vn%252Ffe7c5846d65f58%26relation%3Dparent.parent&container_width=0&href=https%3A%2F%2Fdantri.com.vn%2Fnews-\" + \\\r\n id+\".htm&layout=button_count&locale=vi_VN&sdk=joey&share=false&show_faces=false\"\r\n yield scrapy.Request(like_request, callback=self.parse_likes, meta={'article': article, 'id': id})\r\n\r\n def parse_likes(self, response):\r\n article = response.meta['article']\r\n id = response.meta['id']\r\n\r\n likes = response.xpath(\r\n '//button[@type=\"submit\"]/div/span[3]/text()').get()\r\n if likes is not None:\r\n strings = [s for s in likes.split() if s.isdigit()]\r\n if len(strings) != 0:\r\n likes = strings[0]\r\n else:\r\n likes = '0'\r\n else:\r\n likes = '0'\r\n\r\n article.update({'likes-counter': likes})\r\n\r\n CMT_CAP = 10000\r\n cmt_request = \"https://apicomment.dantri.com.vn/api/comment/list/1-\" + \\\r\n id+\"-0-0-\"+str(CMT_CAP)+\".htm\"\r\n yield scrapy.Request(cmt_request, callback=self.parse_comments, meta={'article': article})\r\n\r\n def parse_comments(self, response):\r\n article = response.meta['article']\r\n\r\n str = ''\r\n for a in response.xpath('//text()').getall():\r\n str += a\r\n\r\n if str is 'null':\r\n article.update({'comments-count': 0, 'comments': ''})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n\r\n cmt_dict = []\r\n check = 0\r\n string = ''\r\n\r\n for a in str:\r\n if a is '{':\r\n check = 1\r\n if check is 1:\r\n string += a\r\n if a is '}':\r\n check = 0\r\n try:\r\n cmt_dict.append(json.loads(string))\r\n except:\r\n pass\r\n string = ''\r\n\r\n cmt_count = len(cmt_dict)\r\n for cmt in cmt_dict:\r\n cmt['CreatedDate'] = time.comment_time(cmt['CreatedDate'])\r\n cmt_count += cmt.get('ReplyCount')\r\n article.update({'comments-count': cmt_count, 'comments': cmt_dict})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.4982638955116272, "alphanum_fraction": 0.5120287537574768, "avg_line_length": 50.69281005859375, "blob_id": "54c6e3fd0f2ac2570f07ca09c968af3edc9e94ea", "content_id": "1dcba94587f620fdd8973e7a7548ae5e3315d281", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8072, "license_type": "no_license", "max_line_length": 126, "num_lines": 153, "path": "/news/spiders/dsphapluat.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nfrom scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\nclass DoiSongPhapLuatSpider(CrawlSpider):\r\n name = \"dspl\"\r\n custom_settings = {\r\n 'CONCURRENT_REQUESTS': 100,\r\n 'REACTOR_THREADPOOL_MAXSIZE': 20,\r\n 'LOG_LEVEL': 'INFO',\r\n 'COOKIES_ENABLED': False,\r\n 'RETRY_ENABLED': False,\r\n 'REDIRECT_ENABLED': False,\r\n 'AJAXCRAWL_ENABLED': True,\r\n }\r\n\r\n allowed_domains = ['www.doisongphapluat.com', 'sharefb.cnnd.vn', 'www.facebook.com']\r\n start_urls = ['https://www.doisongphapluat.com/']\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['www.doisongphapluat.com']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n title = response.xpath('//h1[@class=\"art-title\"]/text()').extract_first()\r\n if title is not None:\r\n # get ld_json\r\n article= dict()\r\n ld_json = response.xpath('//script[contains(text(),\"NewsArticle\")]/text()').get()\r\n if ld_json is not None:\r\n try:\r\n r = ld_json[::-1].replace(',', ' ', 1)[::-1]\r\n article = json.loads(r)\r\n except ValueError:\r\n article = dict()\r\n if 'dateModified' in article.keys():\r\n article.update({'dateModified': time.Dspl_timestamp(article.get('dateModified'))})\r\n if 'datePublished' in article.keys():\r\n article.update({'datePublished': time.Dspl_timestamp(article.get('datePublished'))})\r\n\r\n\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n # get image\r\n thumbnail = response.xpath('(//td[@class=\"pic\"]/div/img/@src)|(//td[@class=\"pic\"]/h2/img/@src)|(//td['\r\n '@class=\"pic\"]//img/@src)|(//div[@id=\"main-detail\"]/div/a/img)|(//div['\r\n '@type=\"Photo\"]/p/a/img/@src)').getall()\r\n article.update({'thumbnail': thumbnail})\r\n # get meta\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//div[@class='listhome left'][2]/a/span/text()\").get()})\r\n article.update({'language': response.xpath(\"//meta[@name='language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'organization': 'Đời sống pháp luật'})\r\n # author\r\n content = ''\r\n for text in response.xpath(\r\n '(//div[@id=\"main-detail\"]/p[@style=\"text-align: justify;\"]/text())|(//div['\r\n '@id=\"main-detail\"]/div[@style=\"text-align: justify;\"]/p[@style=\"text-align: justify;\"]/text())|('\r\n '//div[@id=\"main-detail\"]/div[@style=\"text-align: justify;\"]/p[@style=\"text-align: '\r\n 'justify;\"]/strong/text())|(//span[@style=\"font-size: small;\"]/strong/em/text())|(//div['\r\n '@id=\"main-detail\"]/div[@style=\"text-align: justify;\"]/p[@style=\"text-align: '\r\n 'justify;\"]/div/h4/text())|(//em/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n if content is not None:\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n else:\r\n word_count = -1\r\n article.update({'word_count': word_count})\r\n # get related_urls\r\n relate_url = []\r\n htags = response.xpath('//ul[@class=\"listing pkg\"]/li/h3[@class=\"title\"]')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/text()').get()\r\n url = str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": relate_url})\r\n # get interactions\r\n id_article = dict()\r\n url = response.xpath('//meta[@property=\"og:url\"]/@content').get()\r\n if url is not None:\r\n id_article.update({'url': url})\r\n like_request = \"https://www.facebook.com/plugins/like.php?app_id=1547540628876392&channel=https%3A%2F\" \\\r\n \"%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversiondomainvnexpress.net\" \\\r\n \"%26origin%3Dhttps%253A%252F%252Fvnexpress.netrelation%3Dparent.parent&container_width\" \\\r\n \"=0&href=\" + \\\r\n id_article[\r\n 'url'] + \"&layout=button_count&locale=en_US&sdk=joey&send=false&show_faces=true&width=450\"\r\n yield scrapy.Request(like_request, callback=self.parse_like, meta={'data': article, 'id_article': id_article})\r\n\r\n else:\r\n yield article\r\n\r\n def parse_share(self, response):\r\n log = response.meta['data']\r\n share = response.xpath('(//span[@id=\"u_0_3\"]/text())|(//*[@id=\"u_0_4\"]/text())').get()\r\n if share is not None:\r\n if \"k\" in share.lower():\r\n share = share.lower()\r\n share = share.replace(\",\", \".\")\r\n share = share.replace(\"k\", \"\")\r\n share = float(share) * 1000\r\n share = int(share)\r\n else:\r\n share = -1\r\n log.update({'share_count': share})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n log.get('link'))\r\n self.articleCount += 1\r\n yield log\r\n\r\n def parse_like(self, response):\r\n log = response.meta['data']\r\n id_article = response.meta['id_article']\r\n likes = response.xpath('(//span[@id=\"u_0_3\"]/text())|(//*[@id=\"u_0_4\"]/text())').get()\r\n if likes is not None:\r\n if \"k\" in likes.lower():\r\n likes = likes.lower()\r\n likes = likes.replace(\",\", \".\")\r\n likes = likes.replace(\"k\", \"\")\r\n likes = float(likes) * 1000\r\n likes = int(likes)\r\n else:\r\n likes = -1\r\n log.update({'like_count': likes})\r\n share_rq = \"https://www.facebook.com/plugins/share_button.php?app_id=197055007120496&channel\" \\\r\n \"=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php\" \\\r\n \"%3Fversiondomainvnexpress.net%26origin%3Dhttps%253A%252F%252Fvnexpress.netrelation\" \\\r\n \"%3Dparent.parent&container_width=0&href=\" + \\\r\n id_article[\r\n 'url'] + \"&layout=button_count&locale=en_US&sdk=joey&send=false&show_faces=true\" \\\r\n \"&width=450 \"\r\n yield scrapy.Request(share_rq, callback=self.parse_share, meta={'data': log})\r\n\r\n" }, { "alpha_fraction": 0.5235254168510437, "alphanum_fraction": 0.5267643928527832, "avg_line_length": 48.568965911865234, "blob_id": "53e851b435dc1c174f95f0d38e8975e68eaa6058", "content_id": "5e1d5d16d515afadd3bdd60be4736334049874b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5866, "license_type": "no_license", "max_line_length": 212, "num_lines": 116, "path": "/news/spiders/yeah1.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport json\r\nimport modules.timeConverter as time\r\nclass Yeah1Spider(CrawlSpider):\r\n name = \"yeah1\"\r\n allowed_domains = ['yeah1.com', 'sharefb.cnnd.vn',]\r\n start_urls = ['https://yeah1.com/']\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['yeah1.com']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n title = response.xpath('//meta[@property=\"og:title\"]/@content').extract_first()\r\n if title is not None:\r\n # get ld json\r\n try:\r\n ld_json = response.xpath('//script[contains(text(),\"NewsArticle\")]/text()').get()\r\n if ld_json is None:\r\n return 0\r\n else:\r\n ld_json = ld_json\r\n article = json.loads(ld_json)\r\n except ValueError:\r\n return 0\r\n # get title, link\r\n\r\n if article is not None:\r\n article = article\r\n else:\r\n article = dict()\r\n\r\n datePublished = article.get('datePublished')\r\n if datePublished is not '':\r\n datePublished = time.Yeah1_timestamp(datePublished)\r\n article.update({'datePublished': datePublished})\r\n else:\r\n datePublished = response.xpath('//span[@class=\"time\"]/text()').get()\r\n datePublished = datePublished.strip()\r\n datePublished = time.Tiin_timestamp(datePublished)\r\n article.update({'datePublished': datePublished})\r\n\r\n dateModified = article.get('dateModified')\r\n if dateModified is not '':\r\n dateModified = time.Yeah1_timestamp(dateModified)\r\n article.update({'dateModified': dateModified})\r\n else:\r\n dateModified = response.xpath('//span[@class=\"time\"]/text()').get()\r\n dateModified = dateModified.strip()\r\n dateModified = time.Tiin_timestamp(dateModified)\r\n article.update({'dateModified': dateModified})\r\n\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n # get meta\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"(//meta[@name='keywords']/@content)|(//meta[@name='news_keywords']/@content)\").get()})\r\n article.update({'category': response.xpath(\"//meta[@property='category']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//meta[@name='copyright']/@content\").get()})\r\n article.update({'language': response.xpath(\"//meta[@name='language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n author = response.xpath('(//div[@class=\"article-content\"]/p/strong/span/text())|((//div[@class=\"article-content\"]/p/strong)[last()]/text())|((//div[@class=\"article-content\"]/p)[last()]/text())').get()\r\n if author is None:\r\n author = response.xpath('//div[@class=\"card-meta\"]/span[2]/text()').get()\r\n article.update({'author': author})\r\n article.update({'organization': 'Yeah1'})\r\n # author\r\n content = ''\r\n for text in response.xpath(\r\n '(//div[@class=\"article-content\"]/p/text())|(//div[@class=\"article-content\"]/h3/text())|(//p['\r\n '@class=\"card-text full-height\"]/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n if content is not None:\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n else:\r\n word_count = -1\r\n article.update({'word_count': word_count})\r\n with open(\"body2.html\", \"wb\") as f:\r\n f.write(response.body)\r\n # get image\r\n thumbnail = response.xpath('(//div[@class=\"article-content\"]/p/a/img/@src)|(//figure/img/@src)').getall()\r\n if thumbnail is not None:\r\n article.update({'thumbnail': thumbnail})\r\n else:\r\n article.update({'thumbnail': '-1'})\r\n # get relate_url\r\n relate_url = []\r\n htags = response.xpath(\r\n '//div[@class=\"col-md-4\"]/div[@class=\"card\"]/div[@class=\"card-body\"]/h4[@class=\"card-title\"]')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/@title').extract_first()\r\n if headline is not []:\r\n url = str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": relate_url})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('link'))\r\n self.articleCount += 1\r\n yield article\r\n" }, { "alpha_fraction": 0.5357331037521362, "alphanum_fraction": 0.5392449498176575, "avg_line_length": 46.69230651855469, "blob_id": "ff28a6d7a649388754a9da2fff9cd06828bcfd6f", "content_id": "b069ef7bf85dc6b297e04186e52ca099585eff25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5695, "license_type": "no_license", "max_line_length": 125, "num_lines": 117, "path": "/news/spiders/tiin.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport scrapy\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass TiinSpider(CrawlSpider):\r\n name = \"tiin\"\r\n custom_settings = {\r\n 'CONCURRENT_REQUESTS': 100,\r\n 'REACTOR_THREADPOOL_MAXSIZE': 20,\r\n 'LOG_LEVEL': 'INFO',\r\n 'COOKIES_ENABLED': False,\r\n 'RETRY_ENABLED': False,\r\n 'REDIRECT_ENABLED': False,\r\n 'AJAXCRAWL_ENABLED': True,\r\n }\r\n allowed_domains = ['tiin.vn', 'sharefb.cnnd.vn', 'www.facebook.com']\r\n start_urls = ['http://tiin.vn/default.html']\r\n\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['tiin.vn'], deny_domains=['diemthi.tiin.vn']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n # get title, link, published\r\n title = response.xpath('//div[@id=\"body-content\"]/h1[@id=\"title-container\"]/span/text()').extract_first()\r\n if title is not None:\r\n # get meta\r\n article.update({'publisher': response.xpath('//meta[@name=\"dc.publisher\"]/@content').get()})\r\n article.update({'type': response.xpath(\"//meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//meta[@name='keywords']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//head/meta[@name='copyright']/@content\").get()})\r\n article.update({'language': response.xpath(\"//meta[@name='language'][2]/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'organization': 'Tiin'})\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n category = response.xpath('//p[@id=\"breadcrumb\"]/a/text()').get()\r\n article.update({'category': category.strip()})\r\n # datePublished\r\n datePublished = response.xpath('//p[@id=\"time\"]/text()').get()\r\n datePublished = datePublished.strip()\r\n datePublished = time.Tiin_timestamp(datePublished)\r\n article.update({'datePublished': datePublished})\r\n\r\n dateModified = response.xpath('//p[@id=\"time\"]/text()').get()\r\n dateModified = dateModified.strip()\r\n dateModified = time.Tiin_timestamp(dateModified)\r\n article.update({'dateModified': dateModified})\r\n\r\n # author\r\n content = ''\r\n author = ''\r\n for text in response.xpath(\r\n '(//p[@class=\"article-author\"]/text())|(//p[@class=\"article-author\"]/a/text())|(//span['\r\n '@class=\"text-source\"]/text())').getall():\r\n author += text.strip()\r\n article.update({'author': author})\r\n for text in response.xpath('//div[@id=\"body-content\"]/p/text()').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n # get relate_url\r\n relate_url = []\r\n htags = response.xpath(\r\n '//div[@class=\"tiin-news-relate\"]/div[@class=\"wrap-news-relate\"]/div[@class=\"news-relate-item\"]')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/@title').get()\r\n url = str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": relate_url})\r\n\r\n # get image\r\n image = response.xpath('(//*[@id=\"body-content\"]/p/img/@src)|(//*[@id=\"body-content\"]/div/img/@src)').getall()\r\n article.update({'thumbnail': image})\r\n # get interactions\r\n like_request = \"https://www.facebook.com/plugins/like.php?href=\" + article[\r\n 'link'] + \"&width&layout=button_count&action=like&show_faces=true&share=true&height=21\"\r\n yield scrapy.Request(like_request, callback=self.parse_like, meta={'data': article})\r\n else:\r\n pass\r\n\r\n def parse_like(self, response):\r\n log = response.meta['data']\r\n likes = response.xpath('(//span[@id=\"u_0_2\"]/text())|(//*[@id=\"u_0_3\"]/text())').get()\r\n if likes is not None:\r\n if \"k\" in likes.lower():\r\n likes = likes.lower()\r\n likes = likes.replace(\",\", \".\")\r\n likes = likes.replace(\"k\", \"\")\r\n likes = float(likes) * 1000\r\n likes = int(likes)\r\n else:\r\n likes = -1\r\n log.update({'like_count': likes})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n log.get('link'))\r\n self.articleCount += 1\r\n yield log" }, { "alpha_fraction": 0.5375984907150269, "alphanum_fraction": 0.5688708424568176, "avg_line_length": 45.06741714477539, "blob_id": "265a9b9a7405881147a3234b98c33c4c87894a91", "content_id": "e28ec13f83e1097adfb6b42b2de50598daa61cbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4189, "license_type": "no_license", "max_line_length": 758, "num_lines": 89, "path": "/news/spiders/test.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\n\r\n\r\ndef remove_ctrl(string):\r\n string = string.replace('\\n', '')\r\n string = string.replace('\\0', '')\r\n string = string.replace('\\t', '')\r\n string = string.replace('\\r', '')\r\n return string\r\n\r\n\r\nclass DantriSpider(scrapy.Spider):\r\n name = 'test'\r\n start_urls = [\r\n \"https://www.nguoiduatin.vn/video-sieu-truc-thang-tang-hinh-niem-tu-hao-the-he-moi-cua-quan-doi-my-a452983.html\"]\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://comment.vietid.net/comments?app_key=d9c694bd04eb35d96f1d71a84141d075&content_url=http://kenh14.vn/news-20191021231634269.chn&news_title=Q8OzIG3hu5dpIGNodXnhu4duICJtdWEgbcOobyDhu58gxJHDonUiIGPFqW5nIGfDonkgYsOjbyBNWEg%2fIOG7pmEgbeG7h3Qga2jDtG5nPyBN4buHdCB0aMOsIGNvaSBj4bqpbSBuYW5nIMSR4buDIGjhu49pIHBow6F0IMSDbiBsdcO0biBuw6gh&num_count=5&debugcache=1&min=0&scroll=0&http_referer=http://kenh14.vn/co-moi-chuyen-mua-meo-o-dau-cung-gay-bao-mxh-ua-met-khong-met-thi-coi-cam-nang-de-hoi-phat-an-luon-ne-20191021231634269.chn&verify=1&verify_flag=6dd71280c421ba5589a03a05e7e07410&funny_flag=0&height=238&iframe_comment_id=mingid_comment_iframe&comment_flag=0&news_url_short=doi-song&real_time=undefined&is_hidden_comment=0\", callback=self.parse_comment)]\r\n\r\n def parse(self, response):\r\n article = {}\r\n\r\n id_finder = response.xpath(\r\n '//script[@type=\"text/javascript\"]/@src').get()\r\n id = id_finder.replace('//embed.easyvideo.vn/play', '')\r\n video_finder = \"https://embed.easyvideo.vn/render/\" + \\\r\n id+\"?targetId=MeCloudLoader_\"+id\r\n yield scrapy.Request(video_finder, callback=self.parse_video, meta={'article': article})\r\n\r\n def parse_video(self, response):\r\n string = ''\r\n for a in response.xpath('//text()').getall():\r\n string += a\r\n pv1 = string.find('720p')\r\n if pv1 < 0:\r\n pv1 = string.find('480p')\r\n if pv1 < 0:\r\n pv1 = string.find('360p')\r\n pv2 = pv1 + string[pv1:].find(':') + 1\r\n pv3 = pv2 + string[pv2:].find('?')\r\n video_url = string[pv2:pv3]\r\n\r\n log = response.meta['article']\r\n log.update({'video-urls': video_url})\r\n yield log\r\n\r\n def get_comment(self, response, XPATH, counter):\r\n comments = []\r\n for comment in response.xpath(XPATH):\r\n comment_dict = {}\r\n primary_comment = comment.xpath('./div[contains(@id,\"form\")]')\r\n primary_ava = primary_comment.xpath(\r\n './/div[@class=\"avatar\"]//img/@src').get()\r\n primary_user = primary_comment.xpath(\r\n './/a[@class=\"full-name\"]/text()').get().strip()\r\n primary_time = primary_comment.xpath(\r\n '//span[@class=\"time-ago\"]/text()').get().strip()\r\n primary_geo = primary_comment.xpath(\r\n './/span[@class=\"city\"]/text()').get().strip()\r\n primary_content = primary_comment.xpath(\r\n './/div[@class=\"cm-content\"]/span/text()').get().strip()\r\n primary_likes = primary_comment.xpath(\r\n './/a[contains(@class,\"vote-count\")]/text()').get().strip()\r\n\r\n comment_dict.update({\r\n 'SenderAvatar': primary_ava,\r\n 'SenderFullName': primary_user,\r\n 'Publishedtime': primary_time,\r\n 'PublishedGeo': primary_geo,\r\n 'CommentContent': primary_content,\r\n 'Liked': primary_likes,\r\n })\r\n counter += 1\r\n if response.xpath('.//ul[@class=\"sub-cm \"]') is None:\r\n comment_dict.update({'Replies-count': 0,\r\n 'Replies': None})\r\n comments.append(comment_dict)\r\n else:\r\n [secondary_dict, secondary_count] = self.get_comment(\r\n comment, './/ul[@class=\"sub-cm \"]/li', 0)\r\n comment_dict.update({'Replies-count': secondary_count,\r\n 'Replies': secondary_dict})\r\n comments.append(comment_dict)\r\n return [comments, counter]\r\n\r\n def parse_comment(self, response):\r\n\r\n yield {'comments': comments}\r\n" }, { "alpha_fraction": 0.5569206476211548, "alphanum_fraction": 0.5792133212089539, "avg_line_length": 46.291866302490234, "blob_id": "f4f0a799849c19ef2d5570245170b777385c78b5", "content_id": "9bd770f96007727da65e770d0cfe2403187f64f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10098, "license_type": "no_license", "max_line_length": 483, "num_lines": 209, "path": "/news/spiders/nguoiduatin.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport json\r\n\r\nimport modules.timeConverter as time\r\n\r\n\r\ndef remove_ctrl(string):\r\n string = string.replace('\\n', '')\r\n string = string.replace('\\0', '')\r\n string = string.replace('\\t', '')\r\n string = string.replace('\\r', '')\r\n return string\r\n\r\n\r\nclass NguoiDuatinSpider(scrapy.Spider):\r\n name = 'nguoiduatin'\r\n allowed_domains = ['nguoiduatin.vn']\r\n\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://www.nguoiduatin.vn/\", callback=self.logged_in)]\r\n # return [scrapy.Request(\"https://www.nguoiduatin.vn/tap-chi-my-conde-nast-traveler-vinh-danh-intercontinental-danang-sun-peninsula-resort-la-khu-nghi-duong-tot-nhat-chau-a-a452528.html\", callback=self.parse_article)]\r\n # return [scrapy.Request(\"https://www.nguoiduatin.vn/hau-due-mat-troi-viet-nam-tap-33-34-dai-uy-duy-kien-coi-quan-ham-di-cuu-nguoi-yeu-a409674.html\", callback=self.parse_article)]\r\n\r\n def logged_in(self, response):\r\n urls = [\r\n \"https://www.nguoiduatin.vn/c/video\",\r\n \"https://www.nguoiduatin.vn/c/chinh-tri-xa-hoi\",\r\n \"https://www.nguoiduatin.vn/c/phap-luat\",\r\n \"https://www.nguoiduatin.vn/c/the-gioi\",\r\n \"https://www.nguoiduatin.vn/c/da-chieu\",\r\n \"https://www.nguoiduatin.vn/c/giai-tri\",\r\n \"https://www.nguoiduatin.vn/c/kinh-doanh\",\r\n \"https://www.nguoiduatin.vn/c/doi-song\",\r\n \"https://www.nguoiduatin.vn/c/cong-nghe\",\r\n \"https://www.nguoiduatin.vn/c/can-biet\",\r\n \"https://www.nguoiduatin.vn/c/infocus\"\r\n ]\r\n for url in urls:\r\n yield scrapy.Request(url)\r\n\r\n def parse(self, response):\r\n for href in response.xpath('/html/body//section[@class = \"col\"]//article/a/@href'):\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n next_page = response.xpath(\r\n '/html/body//li[@class = \"page-item next\"]/a/@href').get()\r\n if next_page is not None:\r\n yield response.follow(next_page, self.parse)\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n try:\r\n # get ld_json\r\n ld_json = response.xpath(\r\n '//html/head/script[contains(text(),\"NewsArticle\")]/text()').get()\r\n ld_json = remove_ctrl(ld_json)\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'category': response.xpath('//li[@class = \"f-rsb m-auto nav-item position-relative d-inline-block active\"]/a/text()').get(),\r\n 'organization': 'người đưa tin',\r\n 'url': response.url,\r\n 'related_urls': response.xpath('//section[@class = \"article-content clearfix\"]/following-sibling::section[@class = \"row\"]//li[@class = \"box-news row pb-3 clearfix py-3 border-bottom \"]/a/@href').getall()\r\n }\r\n article.update(elems)\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('/html/body//section[@class = \"article-content clearfix\"]/article//text()').getall():\r\n content += text.strip()\r\n for text in response.xpath('//div[@class = \"box-center\"]/p/text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n type1_index = 0\r\n for type1_index, src in enumerate(response.xpath('/html/body//section[@class = \"article-content clearfix\"]//figure[@class = \"tplCaption image\"]/img/@src').getall(), 1):\r\n images.update({'image' + str(type1_index): src})\r\n type2_index = type1_index + 1\r\n for type2_index, src in enumerate(response.xpath('//*[contains(@class,\"image-full-width\") or contains(@class,\"box\")]/img/@src').getall(), type2_index):\r\n images.update({'image' + str(type2_index): src})\r\n article.update({'image-urls': images})\r\n\r\n url = response.url\r\n url = url.replace('https://www.nguoiduatin.vn/', '')\r\n id = response.xpath('//@data-id').get()\r\n if id is None:\r\n pv1 = response.url.find('.html')\r\n pv2 = response.url.find('a', pv1-7) + 1\r\n id = response.url[pv2:pv1]\r\n\r\n # get video urls\r\n id_finder = response.xpath(\r\n '//script[contains(@src,\"//embed.easyvideo.vn/play\")]/@src').get()\r\n if id_finder is not None:\r\n easyvideo_id = id_finder.replace('//embed.easyvideo.vn/play', '')\r\n video_finder = \"https://embed.easyvideo.vn/render/\" + \\\r\n easyvideo_id+\"?targetId=MeCloudLoader_\"+easyvideo_id\r\n yield scrapy.Request(video_finder, callback=self.parse_video, meta={'article': article, 'url': url, 'id': id})\r\n else:\r\n # get likes\r\n like_request = \"https://www.facebook.com/v2.9/plugins/like.php?action=like&app_id=1069396303196363&channel=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df122fdd10517174%26domain%3Dwww.nguoiduatin.vn%26origin%3Dhttps%253A%252F%252Fwww.nguoiduatin.vn%252Ff3f7ea1e941e5e4%26relation%3Dparent.parent&container_width=410&href=https%3A%2F%2Fwww.nguoiduatin.vn%2F\" + url + \"&layout=button_count&locale=vi_VN&sdk=joey&share=true&size=small\"\r\n yield scrapy.Request(like_request, callback=self.parse_likes, meta={'article': article, 'id': id})\r\n\r\n def parse_video(self, response):\r\n article = response.meta['article']\r\n url = response.meta['url']\r\n id = response.meta['id']\r\n\r\n string = ''\r\n for a in response.xpath('//text()').getall():\r\n string += a\r\n pv1 = string.find('720p')\r\n if pv1 < 0:\r\n pv1 = string.find('480p')\r\n if pv1 < 0:\r\n pv1 = string.find('360p')\r\n pv2 = pv1 + string[pv1:].find(':') + 1\r\n pv3 = pv2 + string[pv2:].find('?')\r\n video_url = string[pv2:pv3]\r\n article.update({'video-url': video_url})\r\n\r\n # get likes\r\n like_request = \"https://www.facebook.com/v2.9/plugins/like.php?action=like&app_id=1069396303196363&channel=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df122fdd10517174%26domain%3Dwww.nguoiduatin.vn%26origin%3Dhttps%253A%252F%252Fwww.nguoiduatin.vn%252Ff3f7ea1e941e5e4%26relation%3Dparent.parent&container_width=410&href=https%3A%2F%2Fwww.nguoiduatin.vn%2F\" + url + \"&layout=button_count&locale=vi_VN&sdk=joey&share=true&size=small\"\r\n yield scrapy.Request(like_request, callback=self.parse_likes, meta={'article': article, 'id': id})\r\n\r\n def parse_likes(self, response):\r\n article = response.meta['article']\r\n id = response.meta['id']\r\n\r\n likes = response.xpath(\r\n '//button[@type=\"submit\"]/div/span[3]/text()').get()\r\n if likes is None:\r\n likes = '0'\r\n article.update({'likes-counter': likes})\r\n\r\n cmt_request = \"https://www.nguoiduatin.vn/article/\" + \\\r\n id+\"/comments?page=1&&sort=newest\"\r\n yield scrapy.Request(cmt_request, callback=self.parse_comments, meta={'article': article})\r\n\r\n def parse_comments(self, response):\r\n article = response.meta['article']\r\n\r\n str = ''\r\n for a in response.xpath('//text()').getall():\r\n str += a\r\n\r\n if str is 'null':\r\n article.update({'comments-count': 0, 'comments': ''})\r\n # return self.upload_article(article)\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n\r\n cmt_dict = []\r\n check = 0\r\n string = ''\r\n\r\n response_dict = json.loads(str)\r\n comments = response_dict.get('data').get('comments')\r\n users = response_dict.get('data').get('anonymousUsers')\r\n\r\n for comment in comments:\r\n comment['SenderFullName'] = users.get(\r\n comment.pop('anonymousUserId')).get('fullName')\r\n comment['CommentContent'] = comment.pop('content')\r\n comment['CreatedDate'] = comment.pop('createdTime')\r\n comment['Liked'] = comment.pop('likeCount')\r\n comment['Replies'] = comment.pop('replies')\r\n cmt_dict.append(comment)\r\n\r\n article.update({'comments': cmt_dict})\r\n\r\n # return self.upload_article(article)\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.564184844493866, "alphanum_fraction": 0.5795891880989075, "avg_line_length": 39.380531311035156, "blob_id": "60b38cf445983504f071f53835aeb11f31439248", "content_id": "3fc67559ed5ce989822e801bde43ecf491046747", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4674, "license_type": "no_license", "max_line_length": 450, "num_lines": 113, "path": "/test_folder/test_video_vnex.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport re\r\nimport scrapy\r\nimport json\r\nclass vdvnexpressclass(CrawlSpider):\r\n\r\n name = \"vdvnexpress\" # define name of spider\r\n allowed_domains = ['video.vnexpress.net','sharefb.cnnd.vn','usi-saas.vnexpress.net','www.facebook.com']\r\n start_urls = ['https://video.vnexpress.net/']\r\n\r\n rules = (\r\n Rule(LinkExtractor(), callback='parse_item', follow=True),\r\n )\r\n\r\n def parse_item(self, response):\r\n #print(response.url)\r\n article = {}\r\n #get ld json\r\n try:\r\n ld_json = response.xpath('//head/script[@type=\"application/ld+json\"]//text()').get()\r\n if ld_json == None:\r\n return ld_json == 0\r\n else:\r\n ld_json = ld_json\r\n article = json.loads(ld_json)\r\n except ValueError:\r\n return 0\r\n #get meta\r\n\r\n article.update({'meta-description' : response.xpath(\"//head/meta[@name='description']/@content\").get()})\r\n article.update({'meta-keywords' : response.xpath(\"//head/meta[@name='keywords']/@content\").get()})\r\n article.update({'meta-copyright' : response.xpath(\"//head/meta[@name='copyright']/@content\").get()})\r\n article.update({'meta-author' : response.xpath(\"//head/meta[@name='author']/@content\").get()})\r\n article.update({'meta-article:publisher' : response.xpath(\"//head/meta[@property='article:publisher_time']/@content\").get()})\r\n article.update({'meta-article:author' : response.xpath(\"//head/meta[@property='article:author']/@content\").get()})\r\n\r\n\r\n #title, link, author, content\r\n title = response.xpath('//div[@id=\"info_inner\"]/h1[@class=\"title\"]/text()').get()\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n\r\n content =''\r\n author = ''\r\n text = response.xpath('//div[@id=\"info_inner\"]/p[@class=\"author o_info\"]/span/text()').get()\r\n if text == None:\r\n author = ''\r\n else:\r\n author += text.strip()\r\n article.update({'author' : author})\r\n text1 = response.xpath('//div[@id=\"info_inner\"]/div[@class=\"lead_detail\"]/text()').get()\r\n if text1 == None:\r\n content = ''\r\n else:\r\n content += text1.strip()\r\n article.update({'content' : content})\r\n\r\n #get comment\r\n\r\n id ={}\r\n\r\n\r\n objectid = response.xpath('//head/meta[@name=\"tt_article_id\"]/@content').get()\r\n if objectid == None:\r\n return 0\r\n else:\r\n objectid = objectid\r\n siteid = response.xpath('//head/meta[@name=\"tt_site_id\"]/@content').get()\r\n if siteid == None:\r\n return 0\r\n else:\r\n siteid = siteid\r\n categoryid = response.xpath('//head/meta[@name=\"tt_category_id\"]/@content').get()\r\n if categoryid == None:\r\n return 0\r\n else:\r\n categoryid = categoryid\r\n\r\n id.update({'objectid': objectid, 'siteid':siteid, 'categoryid':categoryid})\r\n\r\n\r\n #get total like\r\n like_request = \"https://www.facebook.com/plugins/like.php?action=like&app_id=&channel=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df18c6c90e40dcec%26domain%3Dvideo.vnexpress.net%26origin%3Dhttps%253A%252F%252Fvideo.vnexpress.net%252Ffc8a4c1ee2b278%26relation%3Dparent.parent&container_width=450&href=\" + response.url +\"&layout=button_count&locale=vi_VN&sdk=joey&share=false&show_faces=false&size=large\"\r\n yield scrapy.Request(like_request, callback=self.parse_like, meta={'article': article, 'id': id})\r\n #print(like_request)\r\n\r\n \r\n #get comment\r\n\r\n def parse_comment(self, response):\r\n str1 = ''\r\n for text in response.xpath('//text()').getall():\r\n str1 += text\r\n dict = json.loads(str1)\r\n totalcmt =len(dict)\r\n #print (d)\r\n log = response.meta['data']\r\n log.update({'totalcmt': totalcmt, 'comment': dict})\r\n\r\n yield log\r\n\r\n def parse_like(self,response):\r\n log = response.meta['article']\r\n id = response.meta['id']\r\n\r\n likes = response.xpath('(//span[@id=\"u_0_3\"]/text())|(//*[@id=\"u_0_4\"]/text())').get()\r\n\r\n log.update({'likes-counter': likes})\r\n\r\n cmt_resquest = 'https://usi-saas.vnexpress.net/index/get?offset=0&limit=24&frommobile=0&sort=like&is_onload=1&objectid='+ id['objectid'] + '&objecttype=1&siteid='+ id['siteid']+ '&categoryid='+ id['categoryid']\r\n\r\n yield scrapy.Request(cmt_resquest,callback=self.parse_comment,meta={'data': log})" }, { "alpha_fraction": 0.5016604661941528, "alphanum_fraction": 0.5112206935882568, "avg_line_length": 43.374427795410156, "blob_id": "ffa5c7aa402f247db553d12584e4c82d5dfa5075", "content_id": "d28e5bb67f13e2f5c0bb16c696c7759bee62fd0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9937, "license_type": "no_license", "max_line_length": 244, "num_lines": 219, "path": "/news/spiders/techtalk.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nfrom scrapy.http import FormRequest\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass TechtalkSpider(scrapy.Spider):\r\n name = 'techtalk'\r\n allowed_domains = ['techtalk.vn']\r\n prefix = \"techtalk.vn\"\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://techtalk.vn\", callback=self.logged_in)]\r\n # return [scrapy.Request(\"https://techtalk.vn/video-cam-xuc-khi-choi-lai-game-4-nut.html\", callback=self.parse_article)]\r\n\r\n def logged_in(self, response):\r\n block_urls = [\r\n 'https://techtalk.vn/resources',\r\n 'https://techtalk.vn/tech',\r\n ]\r\n for url in block_urls:\r\n yield scrapy.Request(url, callback=self.parse_block)\r\n\r\n loop_urls = [\r\n 'https://techtalk.vn/category/dev',\r\n 'https://techtalk.vn/category/su-kien',\r\n 'https://techtalk.vn/category/chuyen-gia-noi',\r\n 'https://techtalk.vn/category/tam-su-coder'\r\n ]\r\n for url in loop_urls:\r\n yield scrapy.Request(url, callback=self.parse_loop)\r\n\r\n def parse_block(self, response):\r\n block = response.xpath(\r\n '//div[contains(@class, \"wpb_column vc_column_container\")]//div[@class = \"wpb_wrapper\"]/div/@class').get()\r\n block = block.split(' ')[1]\r\n current_page = 1\r\n for i in range(1, 500):\r\n frmdata = {\"action\": \"td_ajax_block\",\r\n \"td_current_page\": str(current_page), \"block_type\": block}\r\n current_page += 1\r\n try:\r\n r = FormRequest(\r\n 'https://techtalk.vn/wp-admin/admin-ajax.php?td_theme_name=Newspaper&v=7.3', callback=self.parse, formdata=frmdata, meta={'stillCrawl': True})\r\n # self.logger.info(current_page)\r\n yield r\r\n if r.meta['stillCrawl'] is False:\r\n break\r\n except:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse_loop(self, response):\r\n cate_id = response.xpath(\r\n '//body[contains(@class, \"wpb-js-composer\")]/@class').get()\r\n cate_id = cate_id.split(' ')[3].replace('category-', '')\r\n script = response.xpath(\r\n '//script[contains(text(), \"loopState.max_num_pages\")]').get()\r\n pv1 = script.find('loopState.max_num_pages = ')\r\n pv2 = pv1 + script[pv1:].find(';')\r\n max_pages = script[pv1:pv2]\r\n strings = [s for s in max_pages.split() if s.isdigit()]\r\n max_pages = strings[0]\r\n current_page = 1\r\n for i in range(1, int(max_pages) + 1):\r\n frmdata = {\"action\": \"td_ajax_loop\", \"loopState[sidebarPosition]\": '', \"loopState[moduleId]\": '1',\r\n \"loopState[currentPage]\": str(current_page), \"loopState[atts][category_id]\": cate_id}\r\n current_page += 1\r\n try:\r\n r = FormRequest(\r\n 'https://techtalk.vn/wp-admin/admin-ajax.php?td_theme_name=Newspaper&v=7.3', callback=self.parse, formdata=frmdata, meta={'stillCrawl': True})\r\n yield r\r\n if r.meta['stillCrawl'] is False:\r\n break\r\n except:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse(self, response):\r\n if response.xpath('//h3/a/@href').get() is None:\r\n response.meta['stillCrawl'] = False\r\n for href in response.xpath('//h3/a/@href').getall():\r\n try:\r\n href = href.replace(\"\\\\\", '').replace('\"', '')\r\n yield response.follow(href, callback=self.parse_article)\r\n except:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\r\n '//script[contains(text(),\"Article\")]/text()').get()\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'organization': 'techtalk',\r\n 'url': response.url,\r\n # 'related_urls': response.xpath('//div[@class = \"article-oldnew\"]//div/div[@class = \"article-oldnew-img\"]/a/@href').getall()\r\n }\r\n article.update(elems)\r\n try:\r\n article.update({'category': response.xpath(\r\n '//a[@class = \"entry-crumb\"]')[1].xpath('./span/text()').get()})\r\n except:\r\n pass\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//div[@class = \"td-post-content\"]//p/text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n for index, src in enumerate(response.xpath('//div[@class=\"td-post-content\"]//*[contains(@class,\"image\") or contains(@class,\"Image\")]//@src').getall(), 1):\r\n images.update({'image' + str(index): src})\r\n article.update({'image-urls': images})\r\n\r\n # get video url\r\n videos = {}\r\n for index, src in enumerate(response.xpath('//div[@class=\"td-post-content\"]//iframe/@src').getall(), 1):\r\n videos.update({'video' + str(index): src})\r\n article.update({'video urls': videos})\r\n\r\n # get hashtags\r\n hashtags = {}\r\n for index, href in enumerate(response.xpath('//ul[@class = \"td-tags td-post-small-box clearfix\"]//@href').getall(), 1):\r\n hashtags.update({'tag'+str(index): href})\r\n article.update({'hash-tags': hashtags})\r\n\r\n # get views\r\n views = response.xpath('//div[@class=\"td-post-views\"]//text()').get()\r\n article.update({'views': views})\r\n\r\n # get likes\r\n like_request = \"https://www.facebook.com/plugins/like.php?href=\"+response.url + \\\r\n \"&layout=button_count&show_faces=false&width=105&action=like&colorscheme=light&height=21\"\r\n yield scrapy.Request(like_request, callback=self.parse_likes, meta={'article': article, 'url': response.url})\r\n\r\n def parse_likes(self, response):\r\n article = response.meta['article']\r\n url = response.meta['url']\r\n\r\n likes = response.xpath(\r\n '//button[@type=\"submit\"]/div/span[3]/text()').get()\r\n if likes is not None:\r\n strings = [s for s in likes.split() if s.isdigit()]\r\n if len(strings) != 0:\r\n likes = strings[0]\r\n else:\r\n likes = '0'\r\n else:\r\n likes = '0'\r\n\r\n article.update({'likes-counter': likes})\r\n\r\n # self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n # article.get('url'))\r\n # self.articleCount += 1\r\n # return article\r\n\r\n # get related-urls\r\n request_url = \"https://tr.topdevvn.com/recommend?t=url&c=8d6d4537822016fc85c592e82b08e72b\"\r\n yield scrapy.Request(request_url,\r\n callback=self.parse_related, meta={'article': article}, dont_filter=True, headers={'Referer': url,\r\n 'Accept': '*/*',\r\n 'Origin': 'https://techtalk.vn',\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'})\r\n\r\n def parse_related(self, response):\r\n article = response.meta['article']\r\n\r\n try:\r\n related_urls = []\r\n body = response.xpath('//text()').get()\r\n dict = json.loads(body)\r\n jobs = dict['job']\r\n jobs = json.loads(jobs)\r\n for job in jobs:\r\n related_urls.append(job['site'])\r\n article.update({'related_urls': related_urls})\r\n except:\r\n pass\r\n\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.5126622319221497, "alphanum_fraction": 0.5151946544647217, "avg_line_length": 37.987342834472656, "blob_id": "4b3c23e659d9948b32854f9cf71bbeb105fb3b2e", "content_id": "df1e8a98b25ccf1692ed5ada42207631fef7f02c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6318, "license_type": "no_license", "max_line_length": 154, "num_lines": 158, "path": "/news/spiders/kipalog.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport json\r\nfrom scrapy_splash import SplashRequest\r\n\r\n\r\nclass KipalogSpider(scrapy.Spider):\r\n name = 'kipalog'\r\n # start_urls = [\"https://viblo.asia/newest\"]\r\n\r\n prefix = 'https://kipalog.com'\r\n script = \"\"\"\r\n function main(splash, args)\r\n assert(splash:go(args.url))\r\n assert(splash:wait(0.5))\r\n return splash:html()\r\n end\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://kipalog.com/posts/Toi-da-tiet-kiem--5-moi-thang-voi-Heroku-nhu-the-nao\", callback=self.parse_article)]\r\n\r\n def parse(self, response):\r\n for post in response.xpath('//div[@class = \"post-feed-item\"]'):\r\n tags = []\r\n for tag in post.xpath('.//div[@class=\"tags\"]/a/@href').getall():\r\n tag = \"https://viblo.asia\" + tag\r\n tags.append(tag)\r\n post_url = \"https://viblo.asia\" + post.xpath('.//h3/a/@href').get()\r\n yield SplashRequest(post_url, callback=self.parse_article, endpoint='render.html', args={'lua_source': self.script}, meta={'hash-tags': tags})\r\n\r\n next_page = response.xpath('//li[@class = \"page-item\"]/a/@href').get()\r\n if (next_page):\r\n yield response.follow(next_page, self.parse)\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\r\n \"//script[contains(text(),'Article')]/text()\").get()\r\n ld_json_dict = json.loads(ld_json)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'meta-content-language': response.xpath('//meta[@name = \"content-language\"]/@content').get(),\r\n 'meta-geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'meta-geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'url': response.url\r\n }\r\n article.update(elems)\r\n\r\n # get related posts\r\n related = response.xpath(\r\n '//div[@class = \"suggest posts list\"]/div[@class = \"ui massive list feed-list\"]')[1]\r\n if related is not None:\r\n related_urls = []\r\n for url in related.xpath('.//div[@class=\"header\"]/a/@href').getall():\r\n url = \"https://kipalog.com\" + url\r\n related_urls.append(url)\r\n article.update({'related-urls': related_urls})\r\n\r\n # get hashtags\r\n tags = []\r\n for tag in response.xpath('//h1/div[@class = \"tag\"]/a/@href').getall():\r\n tag = \"https://kipalog.com\" + tag\r\n tags.append(tag)\r\n article.update({'hash-tags': tags})\r\n\r\n # get likes/ upvotes counts\r\n likes = response.xpath(\r\n '//div[@class = \"hidden-meta\"]/input[contains(@ng-init, \"like_count\")]/@ng-init').get()\r\n likes = likes.replace('like_count=', '')\r\n article.update({'likes-counter': likes})\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//section[@id = \"content\"]//p/text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n\r\n # get image url\r\n images = {}\r\n for index, src in enumerate(response.xpath('//section[@id = \"content\"]//img/@src').getall(), 1):\r\n images.update({'image' + str(index): src})\r\n\r\n article.update({'image-urls': images})\r\n\r\n # get comments\r\n postId = response.xpath(\r\n '//div[@class = \"hidden-meta\"]/input[contains(@ng-init, \"postId\")]/@ng-init').get()\r\n postId = postId.replace('postId=', '')\r\n postId = postId.replace(\"'\", '')\r\n comment_url = \"https://kipalog.com/posts/\" + postId + \"/comments\"\r\n yield scrapy.Request(comment_url, callback=self.parse_comments, meta={'article': article})\r\n\r\n def parse_comments(self, response):\r\n article = response.meta['article']\r\n\r\n str = ''\r\n for a in response.xpath('//text()').getall():\r\n str += a\r\n\r\n if str is 'null':\r\n article.update({'comments-count': 0, 'comments': ''})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n\r\n cmt_dict = []\r\n check = 0\r\n string = ''\r\n\r\n for a in str:\r\n if a is '{':\r\n check = 1\r\n if check is 1:\r\n string += a\r\n if a is '}':\r\n check = 0\r\n string += a\r\n try:\r\n cmt_dict.append(json.loads(string))\r\n except:\r\n pass\r\n string = ''\r\n\r\n # No reply function\r\n cmt_count = len(cmt_dict)\r\n for cmt in cmt_dict:\r\n cmt['SenderFullName'] = cmt.get('user').pop(\"name\")\r\n cmt['CommentContent'] = cmt.pop('content')\r\n cmt['Liked'] = cmt.pop('like_count')\r\n cmt['SenderAvatar'] = self.prefix + \\\r\n cmt.get('user').pop('avatar_url_path')\r\n cmt.pop('user')\r\n article.update({'comments-count': cmt_count, 'comments': cmt_dict})\r\n\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.5137912631034851, "alphanum_fraction": 0.5172165036201477, "avg_line_length": 45.83620834350586, "blob_id": "d55bc403f014eb2c50edba71c03b16b7d6026afd", "content_id": "d8bf06f2b2b192f4af9087f0cb36a59ea40e4519", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5547, "license_type": "no_license", "max_line_length": 175, "num_lines": 116, "path": "/news/spiders/cafef.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport scrapy\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\nclass CafefSpider(CrawlSpider):\r\n name = \"cafef\"\r\n custom_settings = {\r\n 'CONCURRENT_REQUESTS': 100,\r\n 'REACTOR_THREADPOOL_MAXSIZE': 20,\r\n 'LOG_LEVEL': 'INFO',\r\n 'COOKIES_ENABLED': False,\r\n 'RETRY_ENABLED': False,\r\n 'REDIRECT_ENABLED': False,\r\n 'AJAXCRAWL_ENABLED': True,\r\n }\r\n allowed_domains = ['cafef.vn', 'sharefb.cnnd.vn']\r\n start_urls = ['http://cafef.vn/']\r\n\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['cafef.vn'], deny_domains=['s.cafef.vn', 'images1.cafef.vn', 'solieu4.cafef.vn','ta.cafef.vn']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n title_arr = response.xpath('//h1[@class=\"title\"]/text()').get()\r\n if title_arr is not None:\r\n title = title_arr.strip()\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath('//script[contains(text(),\"NewsArticle\")]/text()').get()\r\n ld_json = ld_json\r\n ld_json = json.loads(ld_json)\r\n ld_json = time.timestamp_converter(ld_json)\r\n article.update(ld_json)\r\n except:\r\n pass\r\n # get headline\r\n article.update({'headline': response.xpath(\"//meta[@itemprop='headline']/@content\").get()})\r\n # get thumbnail\r\n image_list = response.xpath('//div/img/@src').getall()\r\n image_str = str(image_list)\r\n article.update({'thumbnail': image_str})\r\n # get meta\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//meta[@name='copyright']/@content\").get()})\r\n article.update({'author': response.xpath(\"//meta[@name='author']/@content\").get()})\r\n article.update({'language': response.xpath(\"//meta[@name='Language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'organization': 'Cafef'})\r\n # get title, link\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n article.update({'author': response.xpath(\"//p[@class='author']/text()\").get()})\r\n # get contents\r\n content = ''\r\n for text in response.xpath(\r\n '(//div[@class=\"contentdetail\"]/span/p/text())|(//div[@class=\"companyIntro\"]/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n if content is not None:\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n else:\r\n word_count = -1\r\n article.update({'word_count': word_count})\r\n\r\n # get likes,comments\r\n yield scrapy.Request(\"https://sharefb.cnnd.vn/?urls=\" + response.url,\r\n callback=self.parse_interactions,\r\n headers={'Accept': 'application/json, text/javascript, */*; q=0.01',\r\n 'Origin': 'https://cafef.vn',\r\n 'Referer': response.url,\r\n 'Sec-Fetch-Mode': 'cors',\r\n },\r\n meta={'article': article})\r\n\r\n # get relate_url\r\n relate_url = []\r\n htags = response.xpath('//div[@class=\"bg-tit-samecate\"]/h4')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/@title').get()\r\n url = \"https://cafef.vn\" + str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": str(relate_url)})\r\n\r\n def parse_interactions(self, response):\r\n str1 = ''\r\n for text in response.xpath('//text()').getall():\r\n str1 += text\r\n list_inter = json.loads(str1)\r\n dict_inter = dict(list_inter[0])\r\n del dict_inter['url']\r\n article = response.meta['article']\r\n article.update(dict_inter)\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('link'))\r\n self.articleCount += 1\r\n yield article" }, { "alpha_fraction": 0.5640985369682312, "alphanum_fraction": 0.5652036666870117, "avg_line_length": 40.2400016784668, "blob_id": "610c7984ba1157fd8ecef4dbe13d0d04e0ffa902", "content_id": "c886ef9f958f4d4d736533fb64588aab2b7c80f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6344, "license_type": "no_license", "max_line_length": 143, "num_lines": 150, "path": "/modules/MongoDB_driver_Duong.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import json\r\nfrom bson.objectid import ObjectId\r\n\r\ndef check(string, dict):\r\n try:\r\n return dict[string]\r\n except KeyError:\r\n return \"-1\"\r\n\r\ndef checknumber(string, dict):\r\n try:\r\n return dict[string]\r\n except KeyError:\r\n return -1\r\n\r\n# lấy các comment trong báo\r\ndef list_comment(cmt, article_id, cmt_id_goc):\r\n data_comment = dict()\r\n list_all_comment = []\r\n comment_id = ObjectId()\r\n content_cmt = cmt[\"content\"]\r\n author = cmt[\"sender_fullname\"]\r\n like = int(cmt[\"likes\"])\r\n created_at = cmt[\"created_date\"]\r\n data_comment.update(\r\n {'comment_id': comment_id, 'article_id': article_id, 'content': content_cmt, \"author\": author, \"like\": like, \"created_at\": created_at})\r\n if cmt_id_goc is not None:\r\n data_comment.update({'commentreplyid': cmt_id_goc})\r\n else:\r\n data_comment.update({'commentreplyid': None})\r\n list_all_comment.append(data_comment)\r\n if cmt['child_comments'] is not None:\r\n for cmt_reply in cmt['child_comments']:\r\n reply = list_comment(cmt_reply, article_id, comment_id)\r\n list_all_comment.extend(reply)\r\n return list_all_comment\r\n\r\ndef list_comment_vne(cmt, article_id, cmt_id_goc):\r\n data_comment = dict()\r\n list_all_comment = []\r\n comment_id = ObjectId()\r\n content_cmt = cmt.get('content')\r\n author = cmt.get('full_name')\r\n like = cmt.get(\"userlike\")\r\n data_comment.update(\r\n {'comment_id': comment_id, 'article_id': article_id, 'content': content_cmt, \"author\": author, \"like\": like})\r\n if cmt_id_goc is not None:\r\n data_comment.update({'commentreplyid': cmt_id_goc})\r\n else:\r\n data_comment.update({'commentreplyid': None})\r\n list_all_comment.append(data_comment)\r\n\r\n if cmt.get('replys') is not None:\r\n a = cmt.get('replys')\r\n if a.get('items') is not None:\r\n for i in a.get('items'):\r\n reply = list_comment_vne(i, article_id, comment_id)\r\n list_all_comment.extend(reply)\r\n return list_all_comment\r\n\r\n# insert và update dữ liệu lên db\r\ndef insert_data_article(db, article):\r\n # insert article\r\n data_article = dict()\r\n headline = check(\"headline\", article)\r\n thumbnail = str(check(\"thumbnail\", article))\r\n description = check(\"description\", article)\r\n type_article = check(\"type\", article)\r\n date_published = check(\"datePublished\", article)\r\n date_modified = check(\"dateModified\", article)\r\n author = check(\"author\", article)\r\n publishers = check(\"publisher\", article)\r\n publisher = json.dumps(publishers)\r\n content = check(\"content_article\", article)\r\n images1 = check(\"image\", article)\r\n images = json.dumps(images1)\r\n keywords = check(\"keywords\", article)\r\n category = check(\"category\", article)\r\n language = check(\"language\", article)\r\n geo_place_name = check(\"geo_place_name\", article)\r\n geo_region = check(\"geo_region\", article)\r\n geo_position = check(\"geo_position\", article)\r\n word_count = checknumber(\"word_count\", article)\r\n url = check(\"link\", article)\r\n like = int(checknumber(\"like_count\", article))\r\n share = int(checknumber(\"share_count\", article))\r\n seen = int(checknumber(\"seen\", article))\r\n related_urls1 = check(\"related_url\", article)\r\n related_urls = json.dumps(related_urls1)\r\n organization = check(\"organization\", article)\r\n raw_data = \"-1\"\r\n data_article.update(\r\n {\r\n 'headline': headline, 'thumbnail': thumbnail, 'description': description, 'type': type_article,\r\n 'date_published': date_published, 'date_modified': date_modified, 'author': author, 'publisher': publisher,\r\n 'content': content, 'images': images, 'keywords': keywords, 'category': category, 'language': language,\r\n 'geo_place_name': geo_place_name, 'geo_region': geo_region, 'geo_position': geo_position,\r\n 'word_count': word_count, 'url': url, 'like': like, 'share': share, 'seen': seen, 'organization': organization,\r\n 'related_urls': related_urls\r\n\r\n })\r\n Article = dict()\r\n search_for_article = db['articles'].find_one({'url': url})\r\n if search_for_article is None:\r\n searched_article_id = ObjectId()\r\n Article = {'_id': searched_article_id}\r\n Article.update(data_article)\r\n db['articles'].insert_one(Article)\r\n else:\r\n searched_article_id = search_for_article.get('_id')\r\n db['articles'].update_one(search_for_article,\r\n {'$set': data_article}, True)\r\n try:\r\n json_comment = article[\"comment_article\"]\r\n if 'data_vn' in json_comment:\r\n inf_comment = json_comment['data_vn']\r\n comments = inf_comment\r\n for cmt in comments:\r\n all_comment = list_comment_vne(cmt, searched_article_id, None)\r\n for comment_insert in all_comment:\r\n search_for_comment = db['comments'].find_one(\r\n {\r\n 'article_id': searched_article_id,\r\n 'content': comment_insert.get('content')\r\n }\r\n )\r\n if search_for_comment is None:\r\n db['comments'].insert_one(comment_insert)\r\n else:\r\n db['comments'].update_one(search_for_comment, {'$set': comment_insert}, True)\r\n elif 'Data' in json_comment:\r\n inf_comment = json_comment['Data']\r\n comments = json.loads(inf_comment)\r\n for cmt in comments:\r\n all_comment = list_comment(cmt, searched_article_id, None)\r\n for comment_insert in all_comment:\r\n search_for_comment = db['comments'].find_one(\r\n {\r\n 'article_id': searched_article_id,\r\n 'content': comment_insert.get('content')\r\n }\r\n )\r\n if search_for_comment is None:\r\n db['comments'].insert_one(comment_insert)\r\n else:\r\n db['comments'].update_one(search_for_comment, {'$set': comment_insert}, True)\r\n else:\r\n pass\r\n except:\r\n pass" }, { "alpha_fraction": 0.5892420411109924, "alphanum_fraction": 0.6136919260025024, "avg_line_length": 36.9523811340332, "blob_id": "d7b7784bec9b04d29c5f3623830af624a1d75603", "content_id": "602c352cd590e07e84d79d15236912d8db7529d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 818, "license_type": "no_license", "max_line_length": 89, "num_lines": 21, "path": "/news/mongoPipeline.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\r\nimport modules.mongoDriver as driver\r\nimport os\r\nfrom .config_enviroment import *\r\n\r\n\r\nclass MongoUploadPipeline(object):\r\n def __init__(self):\r\n self.client = MongoClient(\r\n 'mongodb://'+AUTH_USERNAME+':'+AUTH_PASSWORD+'@'+HOST+':'+PORT+'/'+DATABASE)\r\n # self.client = MongoClient(\r\n # 'mongodb://'+ HOST+':'+PORT+'/'+DATABASE)\r\n # self.client = MongoClient('mongodb://localhost:27017')\r\n # self.client.admin.authenticate('admin', 'CIST#2o!7', mechanism='SCRAM-SHA-256')\r\n # self.client = MongoClient(['10.0.8.32'], port=9042)\r\n self.database = 'articles'\r\n\r\n def process_item(self, item, spider):\r\n database = self.client[self.database]\r\n driver.insert_article(database, item)\r\n return item\r\n" }, { "alpha_fraction": 0.46859902143478394, "alphanum_fraction": 0.47428247332572937, "avg_line_length": 33.189998626708984, "blob_id": "8543a1e9e521a27fd780f4170f45c9cbe6a0fee2", "content_id": "7a8f0eb88102d497bbeeaea8616b14413fc933b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3543, "license_type": "no_license", "max_line_length": 160, "num_lines": 100, "path": "/news/pipelines.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import pymongo\r\nfrom pymongo import MongoClient\r\nfrom datetime import datetime\r\n\r\nimport modules.MongoDB_driver_Duong as driver_duong\r\nimport modules.mongoDriver as driver_minh\r\n# import module_news.MongoDB_driver as driver\r\n\r\nfrom .config_enviroment import *\r\n\r\n\r\nclass MongoPipeline(object):\r\n def __init__(self):\r\n if AUTH_USERNAME != '' and HOST != '127.0.0.1':\r\n self.client = MongoClient(\r\n 'mongodb://'+AUTH_USERNAME+':'+AUTH_PASSWORD+'@'+HOST+':'+PORT+'/'+DATABASE)\r\n else:\r\n self.client = MongoClient(\r\n 'mongodb://' + HOST + ':' + PORT + '/' + DATABASE)\r\n self.database = 'articles'\r\n self.spiders_duong = [\r\n 'afamily',\r\n 'baomoi',\r\n 'cafef',\r\n 'tiin',\r\n 'tuoitre',\r\n 'yeah1',\r\n 'vtv.vn',\r\n 'saostar',\r\n 'dspl',\r\n 'vnexpress'\r\n ]\r\n self.spiders_minh = [\r\n 'dantri',\r\n 'kenh14',\r\n 'soha',\r\n 'nguoiduatin',\r\n 'thanhnien',\r\n 'zing',\r\n 'viblo',\r\n 'vietnamnet',\r\n 'techtalk'\r\n ]\r\n\r\n self.organization = {\r\n 'dantri': 'dân trí',\r\n 'kenh14': 'kênh 14',\r\n 'soha': 'soha',\r\n 'nguoiduatin': 'người đưa tin',\r\n 'thanhnien': 'thanh niên',\r\n 'zing': 'zing',\r\n 'viblo': 'viblo',\r\n 'vietnamnet': 'vietnamnet',\r\n 'techtalk': 'techtalk',\r\n 'afamily': 'Afamily',\r\n 'baomoi': 'Báo mới',\r\n 'cafef': 'Cafef',\r\n 'dspl': 'Đời sống pháp luật',\r\n 'saostar': 'Saostar',\r\n 'tiin': 'Tiin',\r\n 'tuoitre': 'Tuổi trẻ',\r\n 'vnexpress': 'Vnexpress',\r\n 'vtv.vn': 'VTV',\r\n 'yeah1': 'Yeah1'\r\n }\r\n\r\n self.dateLimit = 5 # Number of days to check\r\n self.tolerables = 0\r\n self.maxTolerables = 30\r\n self.last = None\r\n\r\n def process_item(self, item, spider):\r\n database = self.client[self.database]\r\n\r\n # print(self.last)\r\n if spider.crawlMode is not None:\r\n if spider.crawlMode is 'Update':\r\n if database['articles'].find_one({'organization': self.organization.get(spider.name)}) is not None:\r\n if self.last is None:\r\n self.last = database['articles'].find_one(\r\n {'organization': self.organization.get(spider.name)}, sort=[('_id', pymongo.DESCENDING)])\r\n if self.last is not None and self.last.get('date_published') is None:\r\n self.last = None\r\n try:\r\n if (datetime.fromtimestamp(item.get('datePublished')) - datetime.fromtimestamp(self.last.get('date_published'))).days <= self.dateLimit:\r\n self.tolerables += 1\r\n except:\r\n pass\r\n print(self.tolerables)\r\n if self.tolerables >= self.maxTolerables:\r\n spider.crawler.engine.close_spider(\r\n self, reason='duplicate')\r\n\r\n if spider.name in self.spiders_duong:\r\n driver_duong.insert_data_article(database, item)\r\n\r\n if spider.name in self.spiders_minh:\r\n driver_minh.insert_article(database, item, spider.name)\r\n\r\n return item\r\n" }, { "alpha_fraction": 0.5115563869476318, "alphanum_fraction": 0.5157226324081421, "avg_line_length": 51.338623046875, "blob_id": "157603ac582588bd9baa8fbd1419bf227fc46ab4", "content_id": "f2b7efdcafb0f94d6bb2897b889384b5730dcf32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10081, "license_type": "no_license", "max_line_length": 154, "num_lines": 189, "path": "/news/spiders/vnexpress1.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\nclass VnexpressSpider(scrapy.Spider):\r\n name = 'vnexpress'\r\n start_urls = ['https://vnexpress.net/kinh-doanh']\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse(self, response):\r\n menu = response.xpath('//body/nav[@id=\"main_menu\"]/a/@href').getall()\r\n for link in menu:\r\n yield scrapy.Request(link, callback=self.parse_start)\r\n\r\n def parse_start(self, response):\r\n alllink = response.xpath('//article[@class=\"list_news\"]/h4[@class=\"title_news\"]/a[1]/@href').getall()\r\n for link in alllink:\r\n full_url = response.urljoin(link)\r\n yield scrapy.Request(full_url, callback=self.parse_article)\r\n next_page = response.xpath('//*[@id=\"pagination\"]/a[@class=\"next\"]/@href').get()\r\n if next_page is not None:\r\n next_page = response.urljoin(next_page)\r\n yield scrapy.Request(next_page, callback=self.parse_start)\r\n\r\n def parse_article(self, response):\r\n article = dict()\r\n title = response.xpath('(//h1[@class=\"title_news_detail mb10\"]/text())|(//h1[@class=\"title\"]/text())').get()\r\n if title is not None:\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath('//script[contains(text(),\"NewsArticle\")]/text()').get()\r\n ld_json = json.loads(ld_json)\r\n ld_json = time.timestamp_converter(ld_json)\r\n article.update(ld_json)\r\n except:\r\n pass\r\n if 'datePublished' not in article.keys():\r\n datePublished = response.xpath('(//meta[@name=\"pubdate\"]/@content)').get()\r\n if datePublished is not None:\r\n datePublished = datePublished.strip()\r\n datePublished = time.Vnex_timestamp(datePublished)\r\n article.update({'datePublished': datePublished})\r\n else:\r\n datePublished = response.xpath('//meta[@name=\"its_publication\"]/@content').get()\r\n article.update({'datePublished': datePublished})\r\n if 'dateModified' not in article.keys():\r\n dateModified = response.xpath('(//meta[@itemprop=\"dateModified\"]/@content)').get()\r\n if dateModified is not None:\r\n dateModified = dateModified.strip()\r\n dateModified = time.Vnex_timestamp(dateModified)\r\n article.update({'dateModified': dateModified})\r\n else:\r\n dateModified = response.xpath('//meta[@name=\"article_updatetime\"]/@content').get()\r\n article.update({'dateModified': dateModified})\r\n link = response.url\r\n article.update({'link': link, 'title': title})\r\n # get meta\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//head/meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//head/meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//head/meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//head/meta[@name='copyright']/@content\").get()})\r\n article.update({'language': response.xpath(\"//head/meta[@name='Language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'category': response.xpath(\"(//li[@class='start']/h4/a/text())|(//li[@class='start have_cap2 ']/h4/a/text())\").get()})\r\n article.update({'organization': 'Vnexpress'})\r\n content = ''\r\n author = ''\r\n for text in response.xpath('(//section[@class=\"container\"]/section[@class=\"wrap_sidebar_12\"]/section['\r\n '@class=\"sidebar_1\"]/article[@class=\"content_detail fck_detail width_common '\r\n 'block_ads_connect\"]/p[@class=\"Normal\"]/strong/text())|(//p['\r\n '@class=\"author_mail\"]/strong/text())|(//p['\r\n '@style=\"text-align:right;\"]/strong/text())').getall():\r\n author += text.strip()\r\n article.update({'author': author})\r\n for text in response.xpath('(//article[@class=\"content_detail fck_detail width_common '\r\n 'block_ads_connect\"]/p/text())|(//div[@class=\"desc_cation\"]/p/text())|(//div['\r\n '@class=\"desc_cation\"]/p/strong/text())|(//div[contains(@class,'\r\n '\"box_tableinsert\") or contains(@class,\"box_quangcao\") or contains(@class,'\r\n '\"box_brief_info\")]//p//text())|(//div[@class=\"WordSection1\"]/p/text())|(//td/p[@class=\"Image\"]/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n if content is not None:\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n else:\r\n word_count = -1\r\n article.update({'word_count': word_count})\r\n # get image\r\n thumbnail = response.xpath('(//td/img/@src)|(//div[@class=\"item_slide_show clearfix\"]/div/img/@src)').getall()\r\n if thumbnail is not None:\r\n article.update({'thumbnail': thumbnail})\r\n else:\r\n article.update({'thumbnail': '-1'})\r\n # get relate_url\r\n relate_urls = []\r\n htags = response.xpath('//ul[@class=\"list_title\"]/li/a[@data-event-action=\"article_box_related\"]')\r\n for tag in htags:\r\n relate_url = dict()\r\n headline = tag.xpath('/@title').get()\r\n url = \"https://vnexpress.vn\" + str(tag.xpath('/@href').extract_first())\r\n relate_url.update({'headline': headline, 'url': url})\r\n relate_urls.append(relate_url)\r\n article.update({\"related_url\": relate_urls})\r\n # get comment\r\n id_article = dict()\r\n objectid = response.xpath('//head/meta[@name=\"tt_article_id\"]/@content').get()\r\n if objectid is None:\r\n return 0\r\n else:\r\n objectid = objectid\r\n siteid = response.xpath('//head/meta[@name=\"tt_site_id\"]/@content').get()\r\n if siteid is None:\r\n return 0\r\n else:\r\n siteid = siteid\r\n categoryid = response.xpath('//head/meta[@name=\"tt_category_id\"]/@content').get()\r\n if categoryid is None:\r\n return 0\r\n else:\r\n categoryid = categoryid\r\n\r\n id_article.update({'objectid': objectid, 'siteid': siteid, 'categoryid': categoryid})\r\n url_like = response.xpath('//meta[@name=\"its_url\"]/@content').get()\r\n if url_like is not None:\r\n # get total like\r\n like_request = \"https://www.facebook.com/plugins/like.php?href=\" + url_like + \"&layout=button_count\"\r\n yield scrapy.Request(like_request, callback=self.parse_like, meta={'article': article, 'id_article': id_article})\r\n else:\r\n pass\r\n # get comment\r\n\r\n def parse_comment(self, response):\r\n str1 = ''\r\n dict_cmt = dict()\r\n for text in response.xpath('//text()').getall():\r\n str1 += text\r\n if str1 is not None:\r\n try:\r\n dict_cmt = json.loads(str1)\r\n except ValueError:\r\n dict_cmt.update({'data': 'None'})\r\n data_all = dict_cmt[\"data\"]\r\n if \"items\" in data_all:\r\n dict_cmt[\"data_vn\"] = data_all[\"items\"]\r\n del dict_cmt[\"data\"]\r\n log = response.meta['data']\r\n log.update({'comment_article': dict_cmt})\r\n else:\r\n log = response.meta['data']\r\n log.update({'comment_article': dict_cmt})\r\n else:\r\n dict_cmt = json.loads(str1)\r\n log = response.meta['data']\r\n log.update({'comment_article': dict_cmt})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n log.get('link'))\r\n self.articleCount += 1\r\n yield log\r\n\r\n def parse_like(self, response):\r\n log = response.meta['article']\r\n id_article = response.meta['id_article']\r\n likes = response.xpath('(//span[@id=\"u_0_3\"]/text())|(//*[@id=\"u_0_4\"]/text())|(//span[@id=\"u_0_2\"]/text())').get()\r\n if likes is not None:\r\n if \"k\" in likes.lower():\r\n likes = likes.lower()\r\n likes = likes.replace(\",\", \".\")\r\n likes = likes.replace(\"k\", \"\")\r\n likes = float(likes) * 1000\r\n likes = int(likes)\r\n else:\r\n likes = -1\r\n\r\n log.update({'like_count': likes})\r\n cmt_resquest = 'https://usi-saas.vnexpress.net/index/get?offset=0&limit=24&frommobile=0&sort=like&is_onload=1' \\\r\n '&objectid=' + id_article['objectid'] + '&objecttype=1&siteid='+ id_article['siteid'] + \\\r\n '&categoryid=' + id_article['categoryid']\r\n yield scrapy.Request(cmt_resquest, callback=self.parse_comment, meta={'data': log})\r\n" }, { "alpha_fraction": 0.5280701518058777, "alphanum_fraction": 0.5300944447517395, "avg_line_length": 41.08139419555664, "blob_id": "bd038f67b68f6321a48d57dc50dbea1e81aed036", "content_id": "a0d253bf09e69cccfa922b7c22b61b073748b835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7410, "license_type": "no_license", "max_line_length": 174, "num_lines": 172, "path": "/news/spiders/viblo.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport json\r\n\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass VibloSpider(scrapy.Spider):\r\n name = 'viblo'\r\n prefix = \"https://viblo.asia\"\r\n\r\n def start_requests(self):\r\n # return [SplashRequest(\"https://viblo.asia/newest\", callback=self.parse, endpoint='render.html')]\r\n return [scrapy.Request(\"https://viblo.asia/newest\", callback=self.parse)]\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def parse(self, response):\r\n for post in response.xpath('//div[@class = \"post-feed-item\"]'):\r\n try:\r\n tags = []\r\n for tag in post.xpath('.//div[@class=\"tags\"]/a/@href').getall():\r\n tag = self.prefix + tag\r\n tags.append(tag)\r\n post_url = self.prefix + post.xpath('.//h3/a/@href').get()\r\n # yield SplashRequest(post_url, callback=self.parse_article, meta={'splash': {'endpoint': 'execute', 'args': {'lua_source': self.script}}, 'hash-tags': tags})\r\n yield scrapy.Request(post_url, callback=self.parse_article, meta={'hash-tags': tags})\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n next_page = response.xpath(\r\n '//li[@class = \"page-item\"]/a[@rel = \"next\"]/@href').get()\r\n if next_page is not None:\r\n yield response.follow(next_page, callback=self.parse)\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\r\n \"//script[contains(text(),'Article')]/text()\").get()\r\n if (ld_json is None):\r\n ld_json = response.xpath(\r\n \"//script[contains(text(),'NewsArticle')]/text()\").get()\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'meta-content-language': response.xpath('//meta[@name = \"content-language\"]/@content').get(),\r\n 'meta-geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'meta-geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'url': response.url,\r\n 'category': 'viblo article',\r\n 'organization': 'viblo',\r\n 'related-urls': response.xpath('//div[@class = \"related-posts-box\"]//div[contains(@class, \"post-card__title\")]//a/@href').getall()\r\n }\r\n article.update(elems)\r\n\r\n # get hashtags\r\n article.update({'hash-tags': response.meta['hash-tags']})\r\n\r\n # get views\r\n views = response.xpath(\r\n '//div[contains(@data-original-title, \"Views:\")]/@data-original-title').get()\r\n if views is not None:\r\n strings = [s for s in views.split() if s.isdigit()]\r\n if len(strings) != 0:\r\n views = strings[0]\r\n else:\r\n views = '0'\r\n article.update({'view-count': views})\r\n\r\n # get likes/ upvotes counts\r\n likes = response.xpath(\r\n '//div[@class = \"votes votes--side post-actions__vote mb-1\"]/div/text()').get()\r\n if likes is not None:\r\n likes = likes.replace('+', '')\r\n likes = likes.replace('\\n', '')\r\n likes = likes.strip()\r\n article.update({'likes-counter': likes})\r\n\r\n # get comments count\r\n comment_count = response.xpath(\r\n '//div[@class = \"post-meta__item mr-1\"]//button[@class = \"el-button el-button--text\"]/span/text()').get()\r\n if comment_count is not None:\r\n comment_count = comment_count.replace('\\n', '').strip()\r\n article.update({'comments-count': comment_count})\r\n else:\r\n article.update({'comments-count': '0'})\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//div[contains(@class, \"md-contents article-content__body\")]//text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n for index, src in enumerate(response.xpath('//div[contains(@class, \"md-contents article-content__body\")]//img/@src').getall(), 1):\r\n images.update({'image' + str(index): src})\r\n\r\n article.update({'image-urls': images})\r\n\r\n # get comments\r\n id = response.url.split('-')\r\n id = id[len(id) - 1]\r\n comment_url = \"https://viblo.asia/api/posts/\" + id + \"/comments\"\r\n return scrapy.Request(comment_url, callback=self.parse_comments, meta={'article': article})\r\n\r\n def parse_comments(self, response):\r\n article = response.meta['article']\r\n\r\n str = ''\r\n for a in response.xpath('//text()').getall():\r\n str += a\r\n\r\n if str is 'null':\r\n article.update({'comments-count': 0, 'comments': ''})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n\r\n cmt_dict = json.loads(str)\r\n Comments = []\r\n\r\n for index, comment in enumerate(cmt_dict.get('comments').get('data'), 0):\r\n\r\n comment['SenderFullName'] = comment.get(\r\n 'user').get('data').get('name')\r\n comment['CommentContent'] = comment.pop('contents')\r\n comment['CreatedDate'] = time.comment_time(\r\n comment.pop('created_at'))\r\n comment['Liked'] = comment.pop('points')\r\n comment['Replies'] = []\r\n\r\n if comment.get('in_reply_to_comment') is not None:\r\n for cmt in cmt_dict.get('comments').get('data'):\r\n if cmt.get('id') is comment.get('in_reply_to_comment'):\r\n cmt.get('Replies').append(comment)\r\n del cmt_dict.get('comments').get('data')[index]\r\n Comments.append(comment)\r\n\r\n article.update({'comments': Comments})\r\n\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.631007730960846, "alphanum_fraction": 0.6325581669807434, "avg_line_length": 27.409090042114258, "blob_id": "9139d1ff820d1fc46642b6424009a79bf8c7c1ae", "content_id": "ac5ddc005ab5752093cee9c21eca675708dcf71b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 106, "num_lines": 22, "path": "/test_folder/test.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from ..items import DmozItem\r\nfrom scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport re\r\nimport scrapy\r\nimport json\r\n\r\nclass Test(CrawlSpider):\r\n name = \"test\"\r\n allowed_domains = ['baomoi.com','sharefb.cnnd.vn']\r\n start_urls = ['https://baomoi.com/the-gioi.epi/']\r\n\r\n rules = (\r\n Rule(LinkExtractor(), callback='parse_item', follow=True),\r\n )\r\n\r\n\r\n def parse_item(self, response):\r\n item = DmozItem()\r\n item['title'] =response.xpath('//div[@class=\"article\"]/h1[@class=\"article__header\"]/text()').get()\r\n item['link'] = response.url\r\n yield item" }, { "alpha_fraction": 0.5438406467437744, "alphanum_fraction": 0.5550985336303711, "avg_line_length": 44.893402099609375, "blob_id": "3f81af0b8623c7a22e4a0decd20add1215c90e54", "content_id": "cdf75636340c0484e283887362f78e0d04a19f59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9238, "license_type": "no_license", "max_line_length": 336, "num_lines": 197, "path": "/news/spiders/vietnamnet.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass VietnamnetSpider(scrapy.Spider):\r\n name = 'vietnamnet'\r\n allowed_domains = ['vietnamnet.vn/']\r\n\r\n # start_urls = [\r\n # 'https://vietnamnet.vn/vn/kinh-doanh/sasco-vao-top-5-cong-ty-uy-tin-nganh-ban-le-nam-2019-585891.html'\r\n # ]\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://vietnamnet.vn/\", callback=self.logged_in)]\r\n # return [scrapy.Request(\"https://vietnamnet.vn/vn/kinh-doanh/sasco-vao-top-5-cong-ty-uy-tin-nganh-ban-le-nam-2019-585891.html\", callback=self.parse_article)]\r\n # return [scrapy.Request(\"https://vietnamnet.vn/vn/kinh-doanh/sasco-vao-top-5-cong-ty-uy-tin-nganh-ban-le-nam-2019-585891.html\", callback=self.parse_article)]\r\n\r\n def logged_in(self, response):\r\n urls = [\r\n 'https://vietnamnet.vn/vn/thoi-su/',\r\n 'https://vietnamnet.vn/vn/kinh-doanh/',\r\n 'https://vietnamnet.vn/vn/giai-tri/',\r\n 'https://vietnamnet.vn/vn/the-gioi/',\r\n 'https://vietnamnet.vn/vn/giao-duc/',\r\n 'https://vietnamnet.vn/vn/doi-song/',\r\n 'https://vietnamnet.vn/vn/phap-luat/',\r\n 'https://vietnamnet.vn/vn/the-thao/',\r\n 'https://vietnamnet.vn/vn/cong-nghe/',\r\n 'https://vietnamnet.vn/vn/suc-khoe/',\r\n 'https://vietnamnet.vn/vn/bat-dong-san/',\r\n 'https://vietnamnet.vn/vn/ban-doc/',\r\n 'https://vietnamnet.vn/vn/oto-xe-may/',\r\n 'https://vietnamnet.vn/vn/goc-nhin-thang/',\r\n 'https://vietnamnet.vn/vn/ban-tron-truc-tuyen/',\r\n 'https://vietnamnet.vn/vn/hotface/',\r\n ]\r\n for url in urls:\r\n segment = url + 'trang'\r\n yield scrapy.Request(segment + '1', meta={'pageIndex': 1, 'segment': segment})\r\n\r\n def parse(self, response):\r\n pageIndex = response.meta['pageIndex']\r\n segment = response.meta['segment']\r\n\r\n if response.xpath('//div[@class = \"clearfix item\"]/a/@href').get() is None:\r\n return\r\n for href in response.xpath('//div[@class = \"clearfix item\"]/a/@href'):\r\n try:\r\n yield response.follow(href, self.parse_articles)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n next_page_request = segment + str(pageIndex + 1)\r\n yield scrapy.Request(next_page_request, meta={'pageIndex': pageIndex + 1, 'segment': segment})\r\n\r\n def parse_articles(self, response):\r\n article = {}\r\n\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\r\n \"//script[contains(text(),'NewsArticle')]/text()\").get()\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.vietnamnet_timestamp(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'category': response.xpath('//div[@class = \"top-cate-head-title\"]/a/text()').get(),\r\n 'organization': 'vietnamnet',\r\n 'url': response.url,\r\n 'related_urls': response.xpath('//div[@class = \"article-relate\"]//a/@href').getall()\r\n }\r\n article.update(elems)\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//div[@id = \"ArticleContent\"]//p/text()').getall():\r\n content += text.strip()\r\n if content == '':\r\n for text in response.xpath('//div[@class = \"Magazine-Acticle EMA2018\"]//p/text()').getall():\r\n content += text.strip()\r\n\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n for index, src in enumerate(response.xpath('//div[@id = \"ArticleContent\"]//*[contains(@class,\"image\") or contains(@class,\"Image\")]//@src').getall(), 1):\r\n images.update({'image' + str(index): src})\r\n article.update({'image-urls': images})\r\n\r\n # get hashtags\r\n hashtags = {}\r\n for index, href in enumerate(response.xpath('//div[@class=\"tagBoxContent\"]//@href').getall(), 1):\r\n hashtags.update({'tag'+str(index): href})\r\n article.update({'hash-tags': hashtags})\r\n\r\n # get video url\r\n videos = {}\r\n for index, src in enumerate(response.xpath('//iframe[contains(@src,\"embed.vietnamnettv.vn/v\")]/@src').getall(), 1):\r\n videos.update({'video' + str(index): src})\r\n article.update(videos)\r\n\r\n article_id = response.xpath(\r\n '//div[@class = \"fmsidWidgetLike\"]/@data-id').get()\r\n\r\n # get likes\r\n like_request = \"https://www.facebook.com/plugins/like.php?action=like&app_id=&channel=https%3A%2F%2Fstaticxx.facebook.com%2Fconnect%2Fxd_arbiter.php%3Fversion%3D44%23cb%3Df1e420bc40a52c%26domain%3Dvietnamnet.vn%26origin%3Dhttps%253A%252F%252Fvietnamnet.vn%252Ff546fb88125%26relation%3Dparent.parent&container_width=83&href=\" + \\\r\n response.url + \"&layout=button_count&locale=vi_VN&sdk=joey&share=true&show_faces=false&size=small\"\r\n yield scrapy.Request(like_request, callback=self.parse_likes, meta={'article': article, 'article_id': article_id})\r\n\r\n def parse_likes(self, response):\r\n article = response.meta['article']\r\n article_id = response.meta['article_id']\r\n likes = response.xpath(\r\n '//button[@type=\"submit\"]/div/span[3]/text()').get()\r\n if likes is not None:\r\n strings = [s for s in likes.split() if s.isdigit()]\r\n if len(strings) != 0:\r\n likes = strings[0]\r\n else:\r\n likes = '0'\r\n else:\r\n likes = '0'\r\n\r\n article.update({'likes-counter': likes})\r\n if article_id is None:\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n\r\n cmt_request = \"https://i.vietnamnet.vn/jsx/interaction/getInteraction/data.jsx?objkey=vietnamnet.vn_\" + \"/vn/_\" + article_id\r\n yield scrapy.Request(cmt_request, callback=self.parse_comments, meta={'article': article})\r\n\r\n def parse_comments(self, response):\r\n article = response.meta['article']\r\n\r\n string = ''\r\n for a in response.xpath('//text()').getall():\r\n string += a\r\n\r\n string = string.replace('retvar=', '')\r\n\r\n comment_data = json.loads(string)\r\n comments_list = comment_data.get('comments')\r\n if comments_list is not None and len(comments_list) is not 0:\r\n for comment in comments_list:\r\n comment['Liked'] = comment.pop('like')\r\n comment['SenderFullName'] = comment.pop('fullname')\r\n comment['CommentContent'] = comment.pop('content')\r\n comment['CreatedDate'] = time.comment_time(\r\n comment.pop('created_at'))\r\n comment['Replies'] = comment.pop('replies')\r\n if comment['Replies'] is not None:\r\n for reply in comment['Replies']:\r\n reply['Liked'] = reply.pop('like')\r\n reply['SenderFullName'] = reply.pop('fullname')\r\n reply['CommentContent'] = reply.pop('content')\r\n reply['CreatedDate'] = time.comment_time(\r\n reply.pop('created_at'))\r\n reply['Replies'] = reply.pop('replies')\r\n\r\n cmt_count = comment_data.get('totalrecord')\r\n article.update({'comments-count': cmt_count,\r\n 'comments': comments_list})\r\n\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.6204819083213806, "alphanum_fraction": 0.6626505851745605, "avg_line_length": 16.66666603088379, "blob_id": "0bda3ff74ad927996f57d5169fbc00c974901e7a", "content_id": "f68ac35a9611eb69cf98e59f8ebb9dec547cb0c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 28, "num_lines": 9, "path": "/news/config_enviroment_example.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "\"\"\"\r\nAuthenticate Mongodb\r\n\"\"\"\r\nAUTH_USERNAME = 'admin'\r\nAUTH_PASSWORD = 'CIST#2o!7'\r\nPORT = '27017'\r\nHOST = 'mongo'\r\nDATABASE = 'articles'\r\nCONCURRENT_REQUESTS = 32" }, { "alpha_fraction": 0.6259351372718811, "alphanum_fraction": 0.6546134948730469, "avg_line_length": 40.21052551269531, "blob_id": "35cba792a2584a8e754d463a4411f6aeb29399a4", "content_id": "bd0c46c0668932ef3e8b125f93974828e836d607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 175, "num_lines": 19, "path": "/test_folder/video-test.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\nfrom scrapy_splash import SplashRequest\r\n\r\n\r\nclass DantriSpider(scrapy.Spider):\r\n name = 'video-test'\r\n allowed_domains = ['nguoiduatin.vn']\r\n\r\n def start_requests(self):\r\n # pass\r\n yield SplashRequest(\"https://www.nguoiduatin.vn/phan-no-chung-kien-canh-giao-vien-chu-nhiem-bat-tai-danh-dap-hang-loat-hoc-sinh-lop-2-a451647.html\", args={'wait': 15})\r\n # yield scrapy.Request(\"https://api.news.zing.vn/api/comment.aspx?action=get&id=998037\", callback=self.parse_cmt)\r\n # yield SplashRequest(\"https://api.news.zing.vn/api/comment.aspx?action=get&id=998037\", callback=self.parse_cmt,args={\"wait\" : 0.5})\r\n\r\n def parse(self, response):\r\n yield{\r\n 'video-src': response.xpath('//*[@class = \"vmp-tech\"]/@src').get()\r\n }\r\n" }, { "alpha_fraction": 0.5326380729675293, "alphanum_fraction": 0.5480026006698608, "avg_line_length": 44.69130325317383, "blob_id": "f7bb27d248d645f513ecaf73f15eb7f3767f0317", "content_id": "054a5ee4f86281ac1f94a8e99e447224e70350fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10740, "license_type": "no_license", "max_line_length": 210, "num_lines": 230, "path": "/news/spiders/kenh14.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\nimport ast\r\n\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass Kenh14Spider(scrapy.Spider):\r\n name = 'kenh14'\r\n allowed_domains = ['kenh14.vn']\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n # return [scrapy.Request(\"http://kenh14.vn/co-moi-chuyen-mua-meo-o-dau-cung-gay-bao-mxh-ua-met-khong-met-thi-coi-cam-nang-de-hoi-phat-an-luon-ne-20191021231634269.chn\", callback=self.parse_article)]\r\n # return [scrapy.Request(\"http://kenh14.vn/star.chn\", callback=self.parse_nav)]\r\n return [scrapy.Request(\"http://kenh14.vn/\", callback=self.logged_in)]\r\n\r\n def logged_in(self, response):\r\n urls = [\r\n 'http://kenh14.vn/star.chn',\r\n 'http://kenh14.vn/tv-show.chn',\r\n 'http://kenh14.vn/cine.chn',\r\n 'http://kenh14.vn/musik.chn',\r\n 'http://kenh14.vn/beauty-fashion.chn',\r\n 'http://kenh14.vn/doi-song.chn',\r\n 'http://kenh14.vn/an-quay-di.chn',\r\n 'http://kenh14.vn/xa-hoi.chn',\r\n 'http://kenh14.vn/the-gioi.chn',\r\n 'http://kenh14.vn/sport.chn',\r\n 'http://kenh14.vn/hoc-duong.chn',\r\n 'http://kenh14.vn/hoc-duong.chn',\r\n 'http://kenh14.vn/suc-khoe-gioi-tinh.chn',\r\n 'http://kenh14.vn/2-tek.chn'\r\n ]\r\n for url in urls:\r\n yield scrapy.Request(url, callback=self.parse_nav)\r\n\r\n def parse_nav(self, response):\r\n # get category ID\r\n cate_id_finder = response.xpath(\r\n '//script[contains(text(),\"CateId\")]/text()').get()\r\n pv1 = cate_id_finder.find('CateId')\r\n pv2 = cate_id_finder.find(\"'\", pv1)+1\r\n pv3 = cate_id_finder.find(\"'\", pv2)\r\n cate_id = cate_id_finder[pv2:pv3]\r\n # call timeline request\r\n timeline_request = \"http://kenh14.vn/timeline/laytinmoitronglist-1-0-0-0-0-0-\" + \\\r\n cate_id+\"-0-0-0-0.chn\"\r\n return scrapy.Request(timeline_request, callback=self.parse, meta={'page_index': 2, 'cate_id': cate_id})\r\n\r\n def parse(self, response):\r\n page_index = response.meta['page_index']\r\n cate_id = response.meta['cate_id']\r\n if response.xpath('//li[@class=\"knswli need-get-value-facebook clearfix \"]//h3[@class=\"knswli-title\"]/a/@href').get() is None:\r\n return\r\n for href in response.xpath('//li[@class=\"knswli need-get-value-facebook clearfix \"]//h3[@class=\"knswli-title\"]/a/@href'):\r\n try:\r\n yield response.follow(href, self.parse_article)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n # call timeline request\r\n timeline_request = \"http://kenh14.vn/timeline/laytinmoitronglist-\"+str(page_index)+\"-0-0-0-0-0-\" + \\\r\n cate_id+\"-0-0-0-0.chn\"\r\n yield scrapy.Request(timeline_request, callback=self.parse, meta={'page_index': page_index+1, 'cate_id': cate_id})\r\n\r\n def parse_article(self, response):\r\n article = {}\r\n\r\n try:\r\n ld_json = response.xpath(\r\n \"//script[contains(text(),'NewsArticle')]/text()\").get()\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n except:\r\n pass\r\n\r\n # get meta\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'category': response.xpath('//li[@class = \"kmli active\"]/a/text()').get(),\r\n 'organization': 'kênh 14',\r\n 'related_urls': response.xpath('//div[@class = \"kds-same-category clearfix\"]//div[@class = \"rowccm\"]/li/a/@href').getall(),\r\n 'url': response.url\r\n }\r\n article.update(elems)\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//div[@class = \"knc-content\"]//p//text()').getall():\r\n content += text.strip()\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n for index, src in enumerate(response.xpath('//div[@class = \"knc-content\"]//div[@type = \"Photo\"]//@src').getall(), 1):\r\n images.update({'image' + str(index): src})\r\n article.update({'image-urls': images})\r\n\r\n # get video url\r\n videos = {}\r\n for index, src in enumerate(response.xpath('//div[@type=\"VideoStream\"]/@data-src').getall(), 1):\r\n videos.update({'video'+str(index): src})\r\n article.update({'video-urls': videos})\r\n\r\n # get hashtags\r\n hashtags = {}\r\n for index, href in enumerate(response.xpath('//ul[@class=\"knt-list\"]/li//@href').getall(), 1):\r\n hashtags.update({'tag'+str(index): href})\r\n article.update({'hash-tags': hashtags})\r\n\r\n comments_paras = response.xpath(\r\n '//script[@type=\"text/javascript\"][contains(text(),\"comment\")]/text()').get()\r\n pv0 = comments_paras.find(\"MINGID_IFRAME_FUNC.mingidGenIfram\")\r\n pv1 = comments_paras.find(\"(\", pv0)\r\n pv2 = comments_paras.find(\")\", pv1)+1\r\n paras = comments_paras[pv1:pv2]\r\n # danh sach parameters de lay request comment\r\n para_list = ast.literal_eval(paras)\r\n para_list = list(para_list)\r\n\r\n # get interactions\r\n inter_request = \"https://sharefb.cnnd.vn/?urls=\" + response.url\r\n yield scrapy.Request(inter_request, callback=self.get_inter, meta={'article': article, 'paras': para_list}, headers={\r\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\r\n 'Origin': 'https://soha.vn',\r\n 'Referer': 'https://soha.vn/chiu-suc-ep-khong-lo-tu-my-tq-ngam-ngui-buong-tay-bo-roi-du-an-dau-mo-5-ti-usd-voi-doi-tac-lau-nam-20191007161429421.htm',\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\r\n })\r\n\r\n # get interactions\r\n def get_inter(self, response):\r\n article = response.meta['article']\r\n paras = response.meta['paras']\r\n\r\n inter = response.xpath('//text()').get()\r\n inter_dict = json.loads(inter)[0]\r\n del inter_dict['url']\r\n\r\n article.update(inter_dict)\r\n\r\n content_url = paras[3]\r\n news_title = paras[4]\r\n\r\n comment_request = \"https://comment.vietid.net/comments?app_key=d9c694bd04eb35d96f1d71a84141d075&content_url=\" + \\\r\n content_url+\"&news_title=\"+news_title\r\n yield scrapy.Request(comment_request, callback=self.parse_comment, meta={'article': article})\r\n\r\n # get comments\r\n def get_comment(self, response, XPATH, comments_counter):\r\n comments = []\r\n for comment in response.xpath(XPATH):\r\n comment_dict = {}\r\n primary_comment = comment.xpath('./div[contains(@id,\"form\")]')\r\n primary_ava = primary_comment.xpath(\r\n './/div[@class=\"avatar\"]//img/@src').get()\r\n primary_user = primary_comment.xpath(\r\n './/a[@class=\"full-name\"]/text()').get()\r\n if primary_user is not None:\r\n primary_user = primary_user.strip()\r\n primary_time = primary_comment.xpath(\r\n './/span[@class=\"time-ago\"]/text()').get()\r\n if primary_time is not None:\r\n primary_time = primary_time.strip()\r\n primary_geo = primary_comment.xpath(\r\n './/span[@class=\"city\"]/text()').get()\r\n if primary_geo is not None:\r\n primary_geo = primary_geo.strip()\r\n primary_content = primary_comment.xpath(\r\n './/div[@class=\"cm-content\"]/span/text()').get()\r\n if primary_content is not None:\r\n primary_content = primary_content.strip()\r\n primary_likes = primary_comment.xpath(\r\n './/a[contains(@class,\"vote-count\")]/text()').get()\r\n if primary_likes is not None:\r\n primary_likes = primary_likes.strip()\r\n\r\n comment_dict.update({\r\n 'SenderAvatar': primary_ava,\r\n 'SenderFullName': primary_user,\r\n 'CreatedDate': time.comment_time(primary_time),\r\n 'PublishedGeo': primary_geo,\r\n 'CommentContent': primary_content,\r\n 'Liked': primary_likes,\r\n })\r\n comments_counter += 1\r\n if response.xpath('.//ul[@class=\"sub-cm \"]') is None:\r\n comment_dict.update({'Replies-count': 0,\r\n 'Replies': None})\r\n comments.append(comment_dict)\r\n else:\r\n [secondary_comments, secondary_count] = self.get_comment(\r\n comment, './/ul[@class=\"sub-cm \"]/li', 0)\r\n comment_dict.update({'Replies-count': secondary_count,\r\n 'Replies': secondary_comments})\r\n comments.append(comment_dict)\r\n return [comments, comments_counter]\r\n\r\n def parse_comment(self, response):\r\n article = response.meta['article']\r\n comments = self.get_comment(\r\n response, '//ul[@class = \"cm-list\"]/li', 0)[0]\r\n article.update({'comments': comments})\r\n\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.5341445207595825, "alphanum_fraction": 0.5392351746559143, "avg_line_length": 29.4609375, "blob_id": "fe552e59c59611a631cb2fc056f8573d9e3722d5", "content_id": "7521f196f641e9b3ed0d0732cbf6852d2730fded", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8054, "license_type": "no_license", "max_line_length": 166, "num_lines": 256, "path": "/modules/customDriver.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from datetime import datetime\r\nimport json\r\nimport uuid\r\n\r\n\r\ndef timestamp_converter(ld_dict):\r\n date_published = ld_dict.get('datePublished')\r\n date_modified = ld_dict.get('dateModified')\r\n oldFormat = \"%m/%d/%Y %I:%M:%S %p\"\r\n newFormat = \"%Y-%m-%d %H:%M:%S\"\r\n\r\n date_published = datetime.strptime(date_published, oldFormat)\r\n Date_published = date_published.strftime(newFormat)\r\n\r\n date_modified = datetime.strptime(date_modified, oldFormat)\r\n Date_modified = date_modified.strftime(newFormat)\r\n\r\n ld_dict['datePublished'] = Date_published\r\n ld_dict['dateModified'] = Date_modified\r\n\r\n return ld_dict\r\n\r\n\r\ndef get_comments_list(comment, article_id, parent_cmt_id):\r\n comment_list = []\r\n comment_id = uuid.uuid1()\r\n author = comment.get('SenderFullName')\r\n content = comment.get('CommentContent')\r\n created_at = comment.get('CreatedDate')\r\n like = int(comment.get('Liked'))\r\n\r\n db_comment = {\r\n 'comment_id': comment_id,\r\n 'article_id': article_id,\r\n 'author': author,\r\n 'content': content,\r\n 'created_at': created_at,\r\n 'like': like\r\n }\r\n if parent_cmt_id is not None:\r\n db_comment.update({'commentreplyid': parent_cmt_id})\r\n else:\r\n db_comment.update({'commentreplyid': None})\r\n comment_list.append(db_comment)\r\n\r\n if comment['Replies'] is not None:\r\n for reply in comment['Replies']:\r\n comment_list.extend(self.get_comments_list(\r\n reply, article_id, comment_id))\r\n\r\n return comment_list\r\n\r\n\r\ndef insert_article(session, article):\r\n headline = article.get('headline')\r\n if headline is None:\r\n headline = article.get('meta-title')\r\n if headline is None:\r\n headline = '-1'\r\n\r\n thumbnail = json.dumps(article.get('image'))\r\n if thumbnail is None:\r\n thumbnail = '-1'\r\n\r\n description = article.get('description')\r\n if description is None:\r\n description = article.get('meta-description')\r\n if description is None:\r\n description = '-1'\r\n\r\n type = article.get('@type')\r\n if type is None:\r\n type = '-1'\r\n\r\n date_published = article.get('datePublished')\r\n if date_published is None:\r\n date_published = '-1'\r\n\r\n date_modified = article.get('dateModified')\r\n if date_modified is None:\r\n date_modified = '-1'\r\n\r\n author = json.dumps(article.get('author'))\r\n if author is None:\r\n author = article.get('meta-article:author')\r\n if author is None:\r\n author = '-1'\r\n\r\n publisher = json.dumps(article.get('publisher'))\r\n if publisher is None:\r\n publisher = article.get('meta-article:publisher')\r\n if publisher is None:\r\n publisher = '-1'\r\n\r\n content = article.get('content')\r\n if content is None:\r\n content = '-1'\r\n\r\n images = json.dumps(article.get('image-urls'))\r\n if images is None:\r\n images = '-1'\r\n\r\n keywords = article.get('meta-keywords')\r\n if keywords is None:\r\n keywords = '-1'\r\n\r\n category = article.get('category')\r\n if category is None:\r\n category = '-1'\r\n\r\n language = article.get('language')\r\n if language is None:\r\n language = '-1'\r\n\r\n geo_placename = article.get('geo.placename')\r\n if geo_placename is None:\r\n geo_placename = '-1'\r\n\r\n geo_region = article.get('geo.region')\r\n if geo_region is None:\r\n geo_region = '-1'\r\n\r\n geo_position = article.get('geo.position')\r\n if geo_position is None:\r\n geo_position = '-1'\r\n\r\n word_count = article.get('word_count')\r\n if word_count is None:\r\n word_count = -1\r\n\r\n url = article.get('url')\r\n\r\n interactions = article.get('interactions')\r\n like = article.get('likes-counter')\r\n if like is None:\r\n like = article.get('like_count')\r\n if like is None:\r\n if interactions is not None:\r\n like = interactions.get('like_count')\r\n if like is None:\r\n like = \"-1\"\r\n if \"k\" in like.lower():\r\n like = like.lower()\r\n like = like.replace(\",\", \".\")\r\n like = like.replace(\"k\", \"\")\r\n like = float(like) * 1000\r\n like = int(like)\r\n\r\n share = article.get('share_count')\r\n if share is None:\r\n if interactions is not None:\r\n share = interactions.get('share_count')\r\n if share is None:\r\n share = '-1'\r\n if \"k\" in share.lower():\r\n share = share.lower()\r\n share = share.replace(\",\", \".\")\r\n share = share.replace(\"k\", \"\")\r\n share = float(share) * 1000\r\n share = int(share)\r\n\r\n seen = article.get('view-count')\r\n if seen is None:\r\n seen = article.get('views')\r\n if seen is None:\r\n seen = '-1'\r\n\r\n if \"k\" in seen.lower():\r\n seen = seen.lower()\r\n seen = seen.replace(\",\", \".\")\r\n seen = seen.replace(\"k\", \"\")\r\n seen = float(seen) * 1000\r\n seen = int(seen)\r\n\r\n related_urls = json.dumps(article.get('related_urls'))\r\n if related_urls is None:\r\n related_urls = \"-1\"\r\n\r\n raw_data = '-1'\r\n\r\n db_article = {\r\n 'headline': headline,\r\n 'thumbnail': thumbnail,\r\n 'description': description,\r\n 'type': type,\r\n 'date_published': date_published,\r\n 'date_modified': date_modified,\r\n 'author': author,\r\n 'publisher': publisher,\r\n 'content': content,\r\n 'images': images,\r\n 'keywords': keywords,\r\n 'category': category,\r\n 'language': language,\r\n 'geo.placename': geo_placename,\r\n 'geo.region': geo_region,\r\n 'geo.position': geo_position,\r\n 'word_count': word_count,\r\n 'url': url,\r\n 'like': like,\r\n 'share': share,\r\n 'seen': seen,\r\n 'relate_urls': related_urls,\r\n 'raw_data': raw_data\r\n }\r\n\r\n article_id = uuid.uuid1()\r\n\r\n search_for_article = session.execute(\r\n \"\"\"\r\n SELECT * FROM articles WHERE url = '%s'\r\n \"\"\" % url\r\n )\r\n\r\n if len(search_for_article.current_rows) is 0:\r\n searched_article_id = article_id\r\n else:\r\n searched_article_id = search_for_article.current_rows[0].article_id\r\n\r\n session.execute(\r\n \"\"\"\r\n UPDATE articles SET headline = %s, thumbnail = %s, description = %s, type = %s, date_published = %s, date_modified = %s,\r\n author = %s, publisher = %s, content = %s, images = %s, keywords = %s, category = %s, language = %s, geo_place_name = %s, geo_region = %s,\r\n geo_position = %s, word_count= %s, url = %s, like = %s, share = %s, seen = %s, related_urls = %s, raw_data = %s\r\n WHERE article_id = %s \r\n \"\"\",\r\n tuple(db_article.values()) +\r\n (searched_article_id,)\r\n )\r\n\r\n Comments = article.get('comments')\r\n if Comments is not None and len(Comments) != 0:\r\n for comment in Comments:\r\n comment_and_replies = get_comments_list(\r\n comment, searched_article_id, None)\r\n for comment_insert in comment_and_replies:\r\n\r\n search_for_comment = session.execute(\r\n \"\"\"\r\n SELECT * FROM article_comments WHERE article_id = %s AND content = %s \r\n \"\"\",\r\n (searched_article_id,\r\n comment_insert.get('content'))\r\n )\r\n\r\n if len(search_for_comment.current_rows) is 0:\r\n searched_comment_id = comment_insert['comment_id']\r\n else:\r\n searched_comment_id = search_for_comment.current_rows[0].comment_id\r\n\r\n del comment_insert['comment_id']\r\n session.execute(\r\n \"\"\"\r\n UPDATE article_comments SET article_id = %s, author = %s, content = %s, created_at = %s, like = %s, commentreplyid = %s WHERE comment_id = %s \r\n \"\"\", tuple(comment_insert.values()) + (searched_comment_id, )\r\n )\r\n" }, { "alpha_fraction": 0.5023696422576904, "alphanum_fraction": 0.6113743782043457, "avg_line_length": 17.18181800842285, "blob_id": "fa0bcc98ef1d0b5fc42a2bf218f637179a3662ca", "content_id": "a28af26510f63844983219f63f400df6fac2ed4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 28, "num_lines": 11, "path": "/news/config_enviroment.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "\"\"\"\r\nAuthenticate Mongodb\r\n\"\"\"\r\nAUTH_USERNAME = 'admin'\r\nAUTH_PASSWORD = 'CIST#2o!7'\r\nPORT = '27017'\r\nHOST = '127.0.0.1'\r\n# PORT = '7473'\r\n# HOST = '10.0.8.32'\r\nDATABASE = 'articles'\r\nCONCURRENT_REQUESTS = 32\r\n" }, { "alpha_fraction": 0.6309523582458496, "alphanum_fraction": 0.6309523582458496, "avg_line_length": 26, "blob_id": "a40bd73b3c569052bcd9cd01558a32b3ffb978ce", "content_id": "fa0811b2b5e4532d4a8d27564958433da6089874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 36, "num_lines": 3, "path": "/news/utils/helpers.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "\r\ndef removeString(string):\r\n string = string.replace(\"\\n\",\"\")\r\n return string" }, { "alpha_fraction": 0.5901544094085693, "alphanum_fraction": 0.595980167388916, "avg_line_length": 46.35211181640625, "blob_id": "4dd6415b46db3bca300359e2df7807537b75189e", "content_id": "ad5051bad64937462e156635c99bcb1d21b43729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3433, "license_type": "no_license", "max_line_length": 126, "num_lines": 71, "path": "/test_folder/vdtuoitre.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nfrom ..items import News, QuoteItem\r\nimport json\r\nimport requests\r\nclass test(scrapy.Spider):\r\n name='vdtuoitre'\r\n #allowed_domains = ['vnexpress.net']\r\n start_urls=['https://tv.tuoitre.vn/the-gioi-muon-mau.htm']\r\n def parse(self,response):\r\n urls = ['https://tv.tuoitre.vn/tin-nong.htm',\r\n 'https://tv.tuoitre.vn/dac-sac.htm',\r\n 'https://tv.tuoitre.vn/the-gioi-muon-mau.htm',\r\n 'https://tv.tuoitre.vn/chuyen-doi-thuong.htm',\r\n 'https://tv.tuoitre.vn/ban-co-biet.htm',\r\n 'https://tv.tuoitre.vn/the-thao.htm',\r\n 'https://tv.tuoitre.vn/giai-tri.htm',\r\n 'https://tv.tuoitre.vn/hai-huoc.htm']\r\n for url in urls:\r\n yield scrapy.Request(url, callback=self.parse_start)\r\n\r\n def parse_start(self,response):\r\n alllink = response.xpath('//div[@id=\"autonextNoiBat01\"]/div/ul/li/h3/a/@href').getall()\r\n alllink2 = response.xpath('//ul[@class=\"list-program\"]/li/h3/a/@href').getall()\r\n for link in alllink:\r\n #print(link)https://tv.tuoitre.vn/len-ban-ma-tuy-qua-khe-cua-76286.htm\r\n yield scrapy.Request('https://tv.tuoitre.vn' + link, callback=self.parse_video)\r\n for link in alllink2:\r\n yield scrapy.Request('https://tv.tuoitre.vn' + link, callback=self.parse_video)\r\n\r\n\r\n def parse_video(self, response):\r\n video = {}\r\n\r\n link_video = response.xpath('//div[@class=\"fr description-video\"]/h2/a/@href').getall()\r\n video.update({'link_video': link_video})\r\n\r\n title_video = response.xpath('//div[@class=\"fr description-video\"]/h2/a/text()').getall()\r\n video.update({'title_video': title_video})\r\n content_video = response.xpath('//div[@class=\"fr description-video\"]/p[@class=\"sapo-video\"]/text()').getall()\r\n video.update({'content_video': content_video})\r\n author_video = response.xpath('//div[@class=\"fr description-video\"]/p[@class=\"authorvideo\"]/text()').getall()\r\n video.update({'author_video': author_video})\r\n\r\n #get meta\r\n video.update({'meta-description' : response.xpath(\"//head/meta[@name='description']/@content\").get()})\r\n video.update({'meta-keywords' : response.xpath(\"//head/meta[@name='keywords']/@content\").get()})\r\n video.update({'meta-title' : response.xpath(\"//head/meta[@name='title']/@content\").get()})\r\n video.update({'meta-copyright' : response.xpath(\"//head/meta[@name='copyright']/@content\").get()})\r\n video.update({'meta-author' : response.xpath(\"//head/meta[@name='author']/@content\").get()})\r\n video.update({'meta-article:publisher' : response.xpath(\"//head/meta[@property='article:publisher']/@content\").get()})\r\n\r\n\r\n\r\n objectid = response.xpath('//div[@class=\"aspNetHidden\"]/input[@id=\"hidVideoId\"]/@value').get()\r\n cmt_resquest = 'https://id.tuoitre.vn/api/getlist-comment.api?pageindex=1'+ '&objId='+ objectid\r\n\r\n yield scrapy.Request(cmt_resquest,callback=self.parse_comment_video,meta={'data': video})\r\n\r\n return video\r\n \r\n def parse_comment_video(self,response):\r\n str1 = ''\r\n for text in response.xpath('//text()').getall():\r\n str1 += text\r\n dict = json.loads(str1)\r\n totalcmt =len(dict)\r\n \r\n log1 = response.meta['data']\r\n log1.update({'comment': dict})\r\n\r\n yield log1\r\n" }, { "alpha_fraction": 0.5338057279586792, "alphanum_fraction": 0.5376439094543457, "avg_line_length": 28.790908813476562, "blob_id": "5e69c41fbdd71210f4d87bfa02d8fd38b7913772", "content_id": "a7c6356b468cbc03b193813267dd3ce31cf0dafe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6774, "license_type": "no_license", "max_line_length": 76, "num_lines": 220, "path": "/modules/mongoDriver.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import json\r\nfrom bson.objectid import ObjectId\r\n\r\n\r\ndef checkExist(dataType, article):\r\n try:\r\n return article.get(dataType)\r\n except KeyError:\r\n return '-1'\r\n\r\n\r\ndef get_comments_list(comment, article_id, parent_cmt_id):\r\n comment_list = []\r\n author = comment.get('SenderFullName')\r\n content = comment.get('CommentContent')\r\n created_at = comment.get('CreatedDate')\r\n\r\n try:\r\n created_at = float(created_at)\r\n except:\r\n pass\r\n\r\n if comment.get('Liked') is not None:\r\n like = int(comment.get('Liked'))\r\n elif comment.get('Like') is not None:\r\n like = int(comment.get('Like'))\r\n else:\r\n like = -1\r\n\r\n comment_id = ObjectId()\r\n\r\n db_comment = {\r\n '_id': comment_id,\r\n 'article_id': article_id,\r\n 'author': author,\r\n 'content': content,\r\n 'created_at': created_at,\r\n 'like': like\r\n }\r\n if parent_cmt_id is not None:\r\n db_comment.update({'commentreplyid': parent_cmt_id})\r\n else:\r\n db_comment.update({'commentreplyid': None})\r\n comment_list.append(db_comment)\r\n\r\n if comment.get('Replies') is not None:\r\n for reply in comment['Replies']:\r\n comment_list.extend(get_comments_list(\r\n reply, article_id, comment_id))\r\n\r\n return comment_list\r\n\r\n\r\ndef insert_article(database, article, name):\r\n headline = article.get('headline')\r\n if headline is None:\r\n headline = article.get('meta-title')\r\n if headline is None:\r\n headline = '-1'\r\n\r\n thumbnail = json.dumps(article.get('image'))\r\n if thumbnail is None:\r\n thumbnail = '-1'\r\n\r\n description = article.get('description')\r\n if description is None:\r\n description = article.get('meta-description')\r\n if description is None:\r\n description = '-1'\r\n\r\n type = checkExist('@type', article)\r\n date_published = checkExist('datePublished', article)\r\n date_modified = checkExist('dateModified', article)\r\n author = json.dumps(article.get('author'))\r\n if author is None:\r\n author = article.get('meta-article:author')\r\n if author is None:\r\n author = '-1'\r\n publisher = json.dumps(article.get('publisher'))\r\n if publisher is None:\r\n publisher = article.get('meta-article:publisher')\r\n if publisher is None:\r\n publisher = '-1'\r\n content = checkExist('content', article)\r\n\r\n images = json.dumps(article.get('image-urls'))\r\n if images is None:\r\n images = '-1'\r\n\r\n keywords = checkExist('meta-keywords', article)\r\n category = checkExist('category', article)\r\n organization = checkExist('organization', article)\r\n\r\n language = checkExist('language', article)\r\n geo_placename = checkExist('geo.placename', article)\r\n geo_region = checkExist('geo.region', article)\r\n geo_position = checkExist('geo.position', article)\r\n word_count = checkExist('word_count', article)\r\n url = article.get('url')\r\n\r\n interactions = article.get('interactions')\r\n like = article.get('likes-counter')\r\n if like is None:\r\n like = article.get('like_count')\r\n if like is None:\r\n if interactions is not None:\r\n like = interactions.get('like_count')\r\n if like is None:\r\n like = \"-1\"\r\n if \"k\" in like.lower():\r\n like = like.lower()\r\n like = like.replace(\",\", \".\")\r\n like = like.replace(\"k\", \"\")\r\n like = float(like) * 1000\r\n like = int(like)\r\n\r\n share = article.get('share_count')\r\n if share is None:\r\n if interactions is not None:\r\n share = interactions.get('share_count')\r\n if share is None:\r\n share = '-1'\r\n if \"k\" in share.lower():\r\n share = share.lower()\r\n share = share.replace(\",\", \".\")\r\n share = share.replace(\"k\", \"\")\r\n share = float(share) * 1000\r\n share = int(share)\r\n\r\n seen = article.get('view-count')\r\n if seen is None:\r\n seen = article.get('views')\r\n if seen is None:\r\n seen = '-1'\r\n\r\n if \"k\" in seen.lower():\r\n seen = seen.lower()\r\n seen = seen.replace(\",\", \".\")\r\n seen = seen.replace(\"k\", \"\")\r\n seen = float(seen) * 1000\r\n seen = int(seen)\r\n\r\n related_urls = json.dumps(article.get('related_urls'))\r\n if related_urls is None:\r\n related_urls = \"-1\"\r\n\r\n raw_data = '-1'\r\n\r\n db_article = {\r\n 'headline': headline,\r\n 'thumbnail': thumbnail,\r\n 'description': description,\r\n 'type': type,\r\n 'date_published': date_published,\r\n 'date_modified': date_modified,\r\n 'author': author,\r\n 'publisher': publisher,\r\n 'content': content,\r\n 'images': images,\r\n 'keywords': keywords,\r\n 'category': category,\r\n 'organization': organization,\r\n 'language': language,\r\n 'geo_placename': geo_placename,\r\n 'geo_region': geo_region,\r\n 'geo_position': geo_position,\r\n 'word_count': word_count,\r\n 'url': url,\r\n 'like': like,\r\n 'share': share,\r\n 'seen': seen,\r\n 'relate_urls': related_urls,\r\n 'raw_data': raw_data\r\n }\r\n\r\n collection = database[name]\r\n\r\n search_for_article = collection.find_one(\r\n {\r\n 'url': url\r\n }\r\n )\r\n\r\n if search_for_article is None:\r\n searched_article_id = ObjectId()\r\n Article = {'_id': searched_article_id}\r\n Article.update(db_article)\r\n\r\n collection.insert_one(Article)\r\n\r\n else:\r\n searched_article_id = search_for_article.get('_id')\r\n collection.update_one(search_for_article,\r\n {'$set': db_article}, True)\r\n\r\n Comments = article.get('comments')\r\n if Comments is not None and len(Comments) != 0:\r\n for comment in Comments:\r\n comment_and_replies = get_comments_list(\r\n comment, searched_article_id, None)\r\n for comment_insert in comment_and_replies:\r\n cmt_collection = database[name+'_comments']\r\n\r\n search_for_comment = cmt_collection.find_one(\r\n {\r\n 'article_id': searched_article_id,\r\n 'content': comment_insert.get('content')\r\n }\r\n )\r\n\r\n if search_for_comment is None:\r\n cmt_collection.insert_one(\r\n comment_insert\r\n )\r\n else:\r\n del comment_insert['_id']\r\n del comment_insert['commentreplyid']\r\n\r\n cmt_collection.update_one(\r\n search_for_comment, {'$set': comment_insert}, True)\r\n" }, { "alpha_fraction": 0.49688926339149475, "alphanum_fraction": 0.5043550133705139, "avg_line_length": 41.83636474609375, "blob_id": "51228cb61d4bec6ae9f4c8589ba6daf613d5c748", "content_id": "c89418410a545ac9af8808c8d03c2925e6c34a7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9644, "license_type": "no_license", "max_line_length": 292, "num_lines": 220, "path": "/news/spiders/soha.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "import scrapy\r\nimport json\r\n\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass SohaSpider(scrapy.Spider):\r\n name = 'soha'\r\n allowed_domains = ['soha.vn']\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n\r\n self.articleCount = 0\r\n\r\n def start_requests(self):\r\n return [scrapy.Request(\"https://soha.vn/\", callback=self.logged_in)]\r\n\r\n def logged_in(self, response):\r\n urls = [\r\n \"https://soha.vn/thoi-su.htm\",\r\n \"https://soha.vn/kinh-doanh.htm\",\r\n \"https://soha.vn/quoc-te.htm\",\r\n \"https://soha.vn/quan-su.htm\",\r\n \"https://soha.vn/cu-dan-mang.htm\",\r\n \"https://soha.vn/giai-tri.htm\",\r\n \"https://soha.vn/phap-luat.htm\",\r\n \"https://soha.vn/song-khoe.htm\",\r\n \"https://soha.vn/cong-nghe.htm\",\r\n \"https://soha.vn/doi-song.htm\",\r\n \"https://soha.vn/kham-pha.htm\",\r\n ]\r\n # scrape articles\r\n for url in urls:\r\n yield scrapy.Request(url, self.parse, meta={'index': 3})\r\n\r\n # scrape sport articles\r\n yield scrapy.Request(\"https://soha.vn/the-thao.htm\", callback=self.parse_sport_nav)\r\n\r\n # scrape videos - Bo do khong co ld+json\r\n # yield scrapy.Request(\"https://soha.vn/video.htm\", callback=self.parse_video_passer)\r\n\r\n def parse(self, response):\r\n if response.xpath('//@href').get() is None:\r\n return\r\n for href in response.xpath('//div[@class = \"info-new-cate elp-list\"]/h3/a/@href'):\r\n try:\r\n yield response.follow(href, callback=self.parse_article, meta={'atc_type': 'normal'})\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n if 'timeline' not in response.url:\r\n section_id = response.xpath('//*[@id=\"hdZoneId\"]/@value').get()\r\n else:\r\n section_id = response.meta['section_id']\r\n\r\n index = response.meta['index']\r\n next_page = \"https://soha.vn/timeline/\" + \\\r\n section_id + \"/trang-\"+str(index)+\".htm\"\r\n yield scrapy.Request(next_page, callback=self.parse, meta={'index': index+1, 'section_id': section_id})\r\n\r\n def parse_article(self, response):\r\n atc_type = response.meta['atc_type']\r\n\r\n article = {}\r\n\r\n # get ld_json\r\n if atc_type == 'normal':\r\n ld_json = response.xpath(\r\n '//*[@id=\"Head1\"]//script[contains(text(),\"NewsArticle\")]/text()').get()\r\n ld_json_dict = json.loads(ld_json)\r\n ld_json_dict = time.timestamp_converter(ld_json_dict)\r\n article.update(ld_json_dict)\r\n\r\n try:\r\n cate_json = cate = response.xpath(\r\n '//script[contains(text(), \"BreadcrumbList\")]/text()').get().strip()\r\n cate_json = json.loads(cate_json)\r\n category = cate_json.get('itemListElement')[\r\n 1].get('item').get('name')\r\n article.update({'category': category})\r\n except:\r\n pass\r\n\r\n # get meta elements\r\n elems = {\r\n 'meta-description': response.xpath(\"//meta[@name='description']/@content\").get(),\r\n 'meta-keywords': response.xpath(\"//meta[@name='keywords']/@content\").get(),\r\n 'meta-title': response.xpath(\"//meta[@name='title']/@content\").get(),\r\n 'meta-copyright': response.xpath(\"//meta[@name='copyright']/@content\").get(),\r\n 'meta-author': response.xpath(\"//meta[@name='author']/@content\").get(),\r\n 'language': response.xpath('//meta[@http-equiv = \"content-language\"]/@content').get(),\r\n 'geo.placename': response.xpath('//meta[@name = \"geo.placename\"]/@content').get(),\r\n 'geo.position': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'geo.region': response.xpath('//meta[@name = \"geo.region\"]/@content').get(),\r\n 'meta-article:author': response.xpath(\"//meta[@property='article:author']/@content\").get(),\r\n 'meta-article:publisher': response.xpath(\"//meta[@property='article:publisher']/@content\").get(),\r\n 'organization': 'soha',\r\n 'url': response.url,\r\n # 'related_urls': response.xpath('//div[@class = \"article-oldnew\"]//div/div[@class = \"article-oldnew-img\"]/a/@href').getall()\r\n }\r\n article.update(elems)\r\n\r\n # get content\r\n content = ''\r\n for text in response.xpath('//div[@class = \"clearfix news-content\"]/p/text()').getall():\r\n content += text\r\n article.update({'content': content})\r\n\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n\r\n # get image url\r\n images = {}\r\n for index, src in enumerate(response.xpath('//div[@class = \"clearfix news-content\"]/div[@type = \"Photo\"]//@src').getall(), 1):\r\n images.update({'image' + str(index): src})\r\n article.update({'image-urls': images})\r\n\r\n # get likes,comments\r\n yield scrapy.Request(\"https://sharefb.cnnd.vn/?urls=\"+response.url, callback=self.parse_interations, headers={'Accept': 'application/json, text/javascript, */*; q=0.01',\r\n 'Origin': 'https://soha.vn',\r\n 'Referer': response.url,\r\n 'Sec-Fetch-Mode': 'cors',\r\n 'User-Agent': 'Mozilla/5.0 (Windows 10 Win64 x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}, meta={'article': article, 'atc_type': atc_type})\r\n\r\n def parse_sport_nav(self, response):\r\n for href in response.xpath('//ul[@class = \"sub-menu clearfix fr\"]//li/a/@href').getall()[:5]:\r\n try:\r\n yield response.follow(href, self.parse_sport_passer)\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n def parse_sport_passer(self, response):\r\n find_id = response.xpath(\r\n '//script[contains(text(),\"zoneId\")]/text()').get()\r\n pv1 = find_id.find('zoneId')\r\n pv2 = find_id[pv1:].find(\"'\") + pv1 + 1\r\n pv3 = find_id[pv2:].find(\"'\") + pv2\r\n section_id = find_id[pv2:pv3]\r\n page_index = 1\r\n page_url = \"https://soha.vn/timeline_sport/\" + \\\r\n section_id+\"/e0-trang-\"+str(page_index)+\".htm\"\r\n yield scrapy.Request(page_url, callback=self.parse_sport, meta={'section_id': section_id, 'page_index': page_index})\r\n\r\n def parse_sport(self, response):\r\n if response.xpath('//@href').get() is None:\r\n return\r\n for href in response.xpath('//li[@class=\"clearfix\"]/a/@href'):\r\n try:\r\n yield response.follow(href, callback=self.parse_article, meta={'atc_type': \"sport\"})\r\n except Exception:\r\n self.logger.error(\"ERROR: \", exc_info=True)\r\n continue\r\n\r\n section_id = response.meta['section_id']\r\n page_index = response.meta['page_index'] + 1\r\n\r\n page_url = \"https://soha.vn/timeline_sport/\" + \\\r\n section_id+\"/e0-trang-\"+str(page_index)+\".htm\"\r\n yield scrapy.Request(page_url, callback=self.parse_sport, meta={'section_id': section_id, 'page_index': page_index})\r\n\r\n def parse_video_passer(self, response):\r\n PAGE_CAP = 595\r\n for page in range(1, PAGE_CAP):\r\n video_getter = \"https://s1.soha.vn/video/latest/0-\" + \\\r\n str(page)+\"-1000-0.htm\"\r\n yield scrapy.Request(video_getter, callback=self.parse_video)\r\n\r\n def parse_video(self, response):\r\n videos = ''\r\n for a in response.xpath('//text()').getall():\r\n videos += a\r\n if videos == 'null':\r\n return\r\n\r\n video_dict = []\r\n check = 0\r\n string = ''\r\n for a in videos:\r\n if a is '{':\r\n check = 1\r\n if check is 1:\r\n string += a\r\n if a is '}':\r\n string += a\r\n check = 0\r\n try:\r\n dict = json.loads(string)\r\n dict['FileName'] = \"http://vcplayer.mediacdn.vn/1.1?_site=sohanews&vid=sohanews/\" + dict['FileName']\r\n video_dict.append(dict)\r\n except:\r\n pass\r\n string = ''\r\n\r\n for vid in video_dict:\r\n yield {'video-' + str(vid['Id']): vid}\r\n\r\n def parse_interations(self, response):\r\n article = response.meta['article']\r\n atc_type = response.meta['atc_type']\r\n\r\n string = ''\r\n for a in response.xpath('//text()').getall():\r\n string += a\r\n if string == None:\r\n string = '[{ }]'\r\n inter_dict = json.loads(string[1:len(string) - 1])\r\n if atc_type == \"normal\":\r\n del inter_dict[\"url\"]\r\n article.update({'interactions': inter_dict})\r\n\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('url'))\r\n self.articleCount += 1\r\n return article\r\n" }, { "alpha_fraction": 0.5325430035591125, "alphanum_fraction": 0.5337052345275879, "avg_line_length": 48.61176300048828, "blob_id": "c8b0256a1b60d49dc56c36a706ca3922c4189fb6", "content_id": "834f25f7e869586f2d35f1d31e50931d5a163bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4305, "license_type": "no_license", "max_line_length": 131, "num_lines": 85, "path": "/news/spiders/baomoi.py", "repo_name": "vuminhph/news-scraping", "src_encoding": "UTF-8", "text": "from scrapy.spiders import CrawlSpider, Rule\r\nfrom scrapy.linkextractors import LinkExtractor\r\nimport json\r\nimport modules.timeConverter as time\r\n\r\n\r\nclass BaoMoiSpider(CrawlSpider):\r\n name = \"baomoi\"\r\n #allowed_domains = ['baomoi.com', 'sharefb.cnnd.vn']\r\n allowed_domains = ['https://baomoi.com/', ]\r\n start_urls = ['https://baomoi.com/']\r\n\r\n rules = (\r\n Rule(LinkExtractor(allow_domains=['baomoi.com']), callback='parse_item', follow=True),\r\n )\r\n\r\n def __init__(self, crawlMode='', **kwargs):\r\n super().__init__(**kwargs)\r\n self.crawlMode = crawlMode\r\n if crawlMode is 'update' or crawlMode is '':\r\n self.crawlMode = 'Update'\r\n print(self.crawlMode)\r\n\r\n self.articleCount = 0\r\n\r\n def parse_item(self, response):\r\n article = dict()\r\n # get title, link\r\n title = response.xpath('//div[@class=\"article\"]/h1[@class=\"article__header\"]/text()').extract_first()\r\n if title is not None:\r\n # get ld_json\r\n try:\r\n ld_json = response.xpath(\"//script[@type='application/ld+json'][1]/text()\").get()\r\n ld_json = json.loads(ld_json)\r\n ld_json = time.timestamp_converter(ld_json)\r\n article.update(ld_json)\r\n except:\r\n pass\r\n # get meta\r\n article.update({'type': response.xpath(\"//head/meta[@property='og:type']/@content\").get()})\r\n article.update({'description': response.xpath(\"//head/meta[@name='description']/@content\").get()})\r\n article.update({'keywords': response.xpath(\"//head/meta[@name='keywords']/@content\").get()})\r\n article.update({'category': response.xpath(\"//head/meta[@property='article:section']/@content\").get()})\r\n article.update({'copyright': response.xpath(\"//head/meta[@name='copyright']/@content\").get()})\r\n article.update({'Language': response.xpath(\"//head/meta[@name='Language']/@content\").get()})\r\n article.update({'geo_place_name': response.xpath(\"//meta[@name = 'geo.placename']/@content\").get()})\r\n article.update({'geo_region': response.xpath(\"//meta[@name = 'geo.region']/@content\").get()})\r\n article.update({'geo_position': response.xpath(\"//meta[@name = 'geo.position']/@content\").get()})\r\n article.update({'organization': 'Báo mới'})\r\n link = response.url\r\n article.update({'title': title, 'link': link})\r\n # author, content, word_count\r\n content = ''\r\n author = ''\r\n for text in response.xpath(\r\n '(//div[@id=\"ArticleContent\"]/p[@class=\"t-j\"]/span/text())|(//div[@class=\"article__body\"]/p['\r\n '@class=\"body-text body-author\"]/strong/text())|(//p[@class=\"body-text body-author\"]/strong/text())').getall():\r\n author += text.strip()\r\n article.update({'author': author})\r\n for text in response.xpath(\r\n '(//div[@id=\"ArticleContent\"]/p[@class=\"t-j\"]/text())|(//div[@class=\"article__body\"]/p['\r\n '@class=\"body-text\"]/text())|(//div[@class=\"article__sapo\"]/text())').getall():\r\n content += text.strip()\r\n article.update({'content_article': content})\r\n word_count = len(content.split())\r\n article.update({'word_count': word_count})\r\n # get image\r\n thumbnail = response.xpath('//p[@class=\"body-image\"]/img/@src').getall()\r\n article.update({'thumbnail': thumbnail})\r\n # get related_url\r\n relate_url = []\r\n htags = response.xpath('//div[@data-track=\"detail|related\"]/div/h4')\r\n for tag in htags:\r\n relate_urls = {}\r\n headline = tag.xpath('a/@title').get()\r\n url = str(tag.xpath('a/@href').extract_first())\r\n relate_urls.update({'headline': headline, 'url': url})\r\n relate_url.append(relate_urls)\r\n article.update({\"related_url\": relate_url})\r\n self.logger.info(\"#%d: Scraping %s\", self.articleCount,\r\n article.get('link'))\r\n self.articleCount += 1\r\n yield article\r\n else:\r\n pass\r\n" } ]
35
TrendingTechnology/use-proper-hosting
https://github.com/TrendingTechnology/use-proper-hosting
2bcbf02a71d800f73780bcde4b7e0352c6723dcf
375a808302e579a584e2c9bbc04a0a60f45130b4
be141b9612c3c6a22c21331d6917d070125ac4ac
refs/heads/master
2023-06-30T23:07:58.519478
2021-08-08T10:55:44
2021-08-08T10:55:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6834951639175415, "alphanum_fraction": 0.7067961096763611, "avg_line_length": 31.25, "blob_id": "225c812df4a38163aa89b19a600cf94e28026519", "content_id": "078a1e5f76a97711f7f95cb75cb2ca922f9de66e", "detected_licenses": [ "WTFPL" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "permissive", "max_line_length": 92, "num_lines": 16, "path": "/setup.py", "repo_name": "TrendingTechnology/use-proper-hosting", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"UTF-8\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"use-proper-hosting\",\n version=\"1.0.1\",\n author=\"eunwoo1104\",\n author_email=\"sions04@naver.com\",\n description=\"No more support server flooding with questions about unsupported hosting.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/playground1104/use-proper-hosting\",\n packages=setuptools.find_packages()\n)" }, { "alpha_fraction": 0.743842363357544, "alphanum_fraction": 0.7536945939064026, "avg_line_length": 22.882352828979492, "blob_id": "d210b8e86031490b276eed3de61fec414a368ee1", "content_id": "82631becf3219cffd54a3c0b04314a520f1b5927", "detected_licenses": [ "WTFPL" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 406, "license_type": "permissive", "max_line_length": 98, "num_lines": 17, "path": "/README.md", "repo_name": "TrendingTechnology/use-proper-hosting", "src_encoding": "UTF-8", "text": "# use-proper-hosting\nNo more support server flooding with questions about unsupported hosting. \nIf you want to block other hosting platform, feel free to contribute.\n\n## Installation\n```\npip install -U use-proper-hosting\n```\n\n## Example\n\n```py\nfrom use_proper_hosting import detect_repl\n\ndetect_repl()\n```\n![](https://github.com/playground1104/use-proper-hosting/blob/master/.github/example.png?raw=true)\n" }, { "alpha_fraction": 0.6033269166946411, "alphanum_fraction": 0.6058861017227173, "avg_line_length": 44.97058868408203, "blob_id": "1384261b51d5e9f1613c549184ecbae6cfbb4145", "content_id": "c16606247fb8ab53b4abb2c64bff6ff79ce3bfef", "detected_licenses": [ "WTFPL" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1563, "license_type": "permissive", "max_line_length": 161, "num_lines": 34, "path": "/use_proper_hosting/__init__.py", "repo_name": "TrendingTechnology/use-proper-hosting", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\n__version__ = \"1.0.1\"\n\n\nclass PleaseUseProperHosting(Exception):\n def __init__(self, platform: str, username: str = \"there\", extra_words: str = \"\"):\n super().__init__(f\"Invalid Platform; see below for more information.\\n\"\n f\"Hey {username}, it seems that you are using {platform} to run this. Unfortunately, this doesn't support {platform}.\\n\"\n f\"If you can't afford paid hosting platforms and don't know any other hosting platforms, there actually is other great free hostings.\\n\"\n f\"For example, Oracle Cloud offers always free cloud services, and most services offer at least 1 year free tier services.\\n\"\n f\"Why not trying to find those?\\n{extra_words}\")\n\n\ndef detect_repl(safe: bool = False):\n \"\"\"\n Detects if this platform is Replit.\n\n :param safe: Whether to just display text. Default False.\n\n :raises PleaseUseProperHosting: Yes, you are using Replit.\n \"\"\"\n if [x for x in os.environ if x in ['REPL_SLUG', 'REPL_IMAGE', 'REPL_ID', 'REPL_OWNER', 'REPLIT_DB_URL', 'REPL_LANGUAGE', 'REPL_PUBKEYS']]:\n ex = PleaseUseProperHosting(\"Replit\",\n os.environ.get(\"REPL_OWNER\", \"there\"),\n \"Oh, and since you are using Replit, if your plan is not hacker or above, using Replit as hosting is against their ToS.\")\n if safe:\n return print(str(ex), file=sys.stderr)\n raise ex\n\n\nif __name__ == \"__main__\":\n detect_repl()\n" } ]
3
maximino-dev/python_tabs_4_guitar
https://github.com/maximino-dev/python_tabs_4_guitar
d1fb84a1b2967f6d5004bf6c06cba99b63abc76d
e51f73d8f0ec008f1f9220f593277e26db1b7321
04195979a25c2351dce9bffcb65f535e12b7ceb2
refs/heads/main
2023-02-27T20:18:25.526808
2021-01-26T10:27:50
2021-01-26T10:27:50
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 24, "blob_id": "56437ea42ee86af850089753ba01b06dbdd44755", "content_id": "256218912eae402bb7d4845aeecce8e16d96d241", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/README.md", "repo_name": "maximino-dev/python_tabs_4_guitar", "src_encoding": "UTF-8", "text": "\"# python_tabs_4_guitar\" \n" }, { "alpha_fraction": 0.6260934472084045, "alphanum_fraction": 0.6508468389511108, "avg_line_length": 29.528409957885742, "blob_id": "59460e80a8fe39253c34aa1171b01097c9134d7b", "content_id": "67df98f9bd31f0e8968145f435e89171ac21e15f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5376, "license_type": "no_license", "max_line_length": 90, "num_lines": 176, "path": "/main.py", "repo_name": "maximino-dev/python_tabs_4_guitar", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport partition\nfrom tkinter import filedialog, messagebox\n\nclass App:\n\n\tCURRENT_POS = [30,20]\n\n\tDICO = {} # will contain all positions and values of notes\n\tPOS_LIST = [] # will contain all positions of notes\n\n\tdef __init__(self):\n\t\tself.root = tk.Tk()\n\n\t\tself.scene()\n\n\t\tself.root.mainloop()\n\n\tdef cancel(self,event=None):\n\t\tif(len(self.POS_LIST) != 0):\n\t\t\tcoords = self.POS_LIST.pop()\n\t\t\tif(self.DICO[(coords[0],coords[1])] == 0):\n\t\t\t\tself.canv.delete(\"nb_\"+str(coords[0])+str(coords[1]))\n\t\t\t\tself.canv.delete(\"rect_\"+str(coords[0])+str(coords[1]))\n\t\t\t\tself.DICO.pop((coords[0],coords[1]))\n\t\t\telse:\n\t\t\t\tself.canv.delete(\"nb_\"+str(coords[0])+str(coords[1]))\n\t\t\t\tself.canv.delete(\"rect_\"+str(coords[0])+str(coords[1]))\n\t\t\t\tself.DICO[(coords[0],coords[1])] -= 1\n\t\t\t\tself.canv.create_rectangle(coords[0]-5, \n\t\t\t\t\tcoords[1]-5, coords[0]+5, coords[1]+5,\n\t\t\t\t\tfill='white', outline='white',tag=\"rect_\"+str(coords[0])+str(coords[1]))\n\t\t\t\tself.canv.create_text(coords[0],coords[1], text=str(self.DICO[(coords[0],coords[1])]),\n\t\t\t\t\ttag=\"nb_\"+str(coords[0])+str(coords[1]))\n\n\tdef newPartition(self):\n\t\tself.canv.delete(\"all\")\n\t\tself.Partition = partition.Partition(self.canv)\n\t\tself.DICO.clear()\n\t\tself.POS_LIST = []\n\t\tself.canv.create_oval(28,18,33,23,fill='red',outline='red', tag=\"visu\")\n\n\tdef openPartition(self):\n\t\tfile = filedialog.askopenfile(\n\t\t\tmode=\"r\", defaultextension=\".tab\",\n\t\t\tfiletypes=((\"tab Files\", \".tab\"),(\"All files\", \".*\")))\n\t\tif file:\n\t\t\tself.newPartition()\n\t\t\tfor lines in file.readlines():\n\t\t\t\tparse = lines.rstrip().split(',')\n\t\t\t\tcoords = list(map(int,parse[0].split(\" \")))\n\t\t\t\tfor i in range(int(parse[1])+1):\n\t\t\t\t\tself.placeNote(coords)\n\t\tfile.close()\n\n\tdef savePartition(self):\n\t\tfile = filedialog.asksaveasfilename(\n\t\t\tdefaultextension=\".tab\",\n\t\t\tfiletypes=((\"Tab Files\", \".tab\"),(\"All files\", \".*\"))\n\t\t)\n\t\tif not file:\n\t\t\treturn\n\t\ttry:\n\t\t\tf = open(file, \"w\", encoding = \"utf-8\")\n\t\texcept FileNotFoundError:\n\t\t\tmessagebox.showerror(\n\t\t\t\ttitle=\"Error\",\n\t\t\t\tmessage=\"Erreur fichier non trouvé\"\n\t\t\t)\n\t\texcept IOError:\n\t\t\tmessagebox.showerror(\n\t\t\t\ttitle=\"Error\",\n\t\t\t\tmessage=\"Le fichier n'existe pas\"\n\t\t\t)\n\t\telse:\n\t\t\tfor key in self.DICO:\n\t\t\t\tf.write(str(key[0])+\" \"+str(key[1])+\",\"+str(self.DICO[key])+\"\\n\")\n\t\t\tf.close()\n\n\tdef scene(self):\n\t\tself.menu_frame = tk.Frame(self.root)\n\t\tself.menu_frame.pack(side=tk.TOP, expand=True, fill=tk.X, anchor=\"n\")\n\t\tself.menu_fichier = tk.Menubutton(\n\t\t\tself.menu_frame,\n\t\t\ttext=\"File\",\n\t\t\tunderline=0,\n\t\t\trelief=\"raised\")\n\n\t\tself.deroul_fichier = tk.Menu(self.menu_fichier, tearoff=False)\n\t\tself.deroul_fichier.add_command(label=\"New (Ctrl + N)\",\n\t\t\t\t\t\t\t\t\t\tcommand=self.newPartition)\n\t\tself.deroul_fichier.add_command(label=\"Open (Ctrl + O)\",\n\t\t\t\t\t\t\t\t\t\tcommand=self.openPartition)\n\t\tself.deroul_fichier.add_command(label=\"Save (Ctrl + S)\",\n\t\t\t\t\t\t\t\t\t\tcommand=self.savePartition)\n\t\tself.deroul_fichier.add_separator()\n\t\tself.deroul_fichier.add_command(label=\"Quit (Alt + F4)\",\n\t\t\t\t\t\t\t\t\t\tcommand=self.quit)\n\t\tcframe = tk.Frame(self.root)\n\t\tcframe.rowconfigure(0, weight=1)\n\t\tcframe.columnconfigure(0, weight=1)\n\t\tcframe.pack(expand=1, fill=\"both\", padx=5, pady=5)\n\n\t\tself.canv = tk.Canvas(\n\t\t\tcframe,\n\t\t\twidth=1000,\n\t\t\theight=200,\n\t\t\tbg=\"white\")\n\n\t\tself.canv.create_oval(28,18,33,23,fill='red',\n\t\t\toutline='red', tag=\"visu\")\n\n\t\tself.Partition = partition.Partition(self.canv)\n\n\t\thbar = tk.Scrollbar(cframe, orient=\"horizontal\")\n\n\t\tself.canv.configure(\n\t\t\txscrollcommand=hbar.set,\n\t\t\tscrollregion=(0, 0, 20000, 600),\n\t\t)\n\n\t\thbar.configure(command=self.canv.xview)\n\t\thbar.pack(side=\"bottom\")\n\n\t\tself.canv.bind(\"<Motion>\", self.visualisation)\n\t\tself.canv.bind(\"<Button-1>\", self.addNote)\n\t\tself.root.protocol(\"WM_DELETE_WINDOW\", self.quit)\n\t\tself.root.bind(\"<Control-n>\", self.newPartition)\n\t\tself.root.bind(\"<Control-o>\", self.openPartition)\n\t\tself.root.bind(\"<Control-s>\", self.savePartition)\n\t\tself.root.bind(\"<Control-z>\", self.cancel)\n\n\t\tself.menu_fichier.config(menu=self.deroul_fichier)\n\t\tself.menu_fichier.pack(side=tk.LEFT)\n\n\t\tself.canv.pack()\n\n\tdef visualisation(self,event):\n\t\tcoords = self.Partition.convertCanv([event.x,event.y])\n\t\tnew_coords = self.Partition.convertPartition(coords)\n\t\tdelta_x = new_coords[0] - self.CURRENT_POS[0]\n\t\tdelta_y = new_coords[1] - self.CURRENT_POS[1]\n\t\tself.CURRENT_POS = new_coords\n\t\tself.canv.move(\"visu\", delta_x, delta_y)\n\n\tdef addNote(self, event):\n\t\tself.placeNote(self.CURRENT_POS)\n\n\tdef placeNote(self, coords):\n\t\tself.canv.create_rectangle(coords[0]-5, \n\t\t\tcoords[1]-5, coords[0]+5, coords[1]+5,\n\t\t\tfill='white', outline='white',tag=\"rect_\"+str(coords[0])+str(coords[1]))\n\t\tif(self.DICO.get((coords[0],coords[1])) is not None):\n\t\t\tif(self.DICO[(coords[0],coords[1])] == 21):\n\t\t\t\tself.canv.create_text(coords[0],coords[1], text=\"0\",\n\t\t\t\t\ttag=\"nb_\"+str(coords[0])+str(coords[1]))\n\t\t\t\tself.DICO[(coords[0],coords[1])] = 0\n\t\t\telse:\n\t\t\t\tself.canv.create_text(coords[0],coords[1],\n\t\t\t\t\ttext=str(self.DICO[(coords[0],coords[1])]+1),\n\t\t\t\t\ttag=\"nb_\"+str(coords[0])+str(coords[1]))\n\t\t\t\tself.DICO[(coords[0],coords[1])] += 1\n\t\telse:\n\t\t\tself.canv.create_text(coords[0],coords[1], text=\"0\",\n\t\t\t\ttag=\"nb_\"+str(coords[0])+str(coords[1]))\n\t\t\tself.DICO[(coords[0],coords[1])] = 0\n\t\tself.POS_LIST.append(coords)\n\n\n\tdef quit(self):\n\t\tif messagebox.askyesno('Quit', 'Êtes-vous sûr de vouloir quitter ?'):\n\t\t\tself.root.quit()\n\nif __name__=='__main__':\n\tApp()\n\texit(0)\n" }, { "alpha_fraction": 0.43089431524276733, "alphanum_fraction": 0.5069344639778137, "avg_line_length": 28.04166603088379, "blob_id": "d5341d993ed7a4d1e8cf2ec628cfda320da9be1d", "content_id": "c7dd2acbd9086041a928d6ed337109b72c15baf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2091, "license_type": "no_license", "max_line_length": 89, "num_lines": 72, "path": "/partition.py", "repo_name": "maximino-dev/python_tabs_4_guitar", "src_encoding": "UTF-8", "text": "class Partition:\n\n def __init__(self, pcanvas):\n self.draw_partition(pcanvas)\n\n def draw_partition(self, pcanvas):\n \"\"\"Dessine la partition\"\"\"\n pcanvas.create_line(\n 30,20,20000,20,\n fill=\"black\")\n pcanvas.create_line(\n 30,50,20000,50,\n fill=\"black\")\n pcanvas.create_line(\n 30,80,20000,80,\n fill=\"black\")\n pcanvas.create_line(\n 30,110,20000,110,\n fill=\"black\")\n pcanvas.create_line(\n 30,140,20000,140,\n fill=\"black\")\n pcanvas.create_line(\n 30,170,20000,170,\n fill=\"black\")\n\n pcanvas.create_text(\n 10, 20, text=\"e\", font=\"Times 16 italic bold\")\n\n pcanvas.create_text(\n 10, 50, text=\"B\", font=\"Times 16 italic bold\")\n\n pcanvas.create_text(\n 10, 80, text=\"G\", font=\"Times 16 italic bold\")\n\n pcanvas.create_text(\n 10, 110, text=\"D\", font=\"Times 16 italic bold\")\n\n pcanvas.create_text(\n 10, 140, text=\"A\", font=\"Times 16 italic bold\")\n\n pcanvas.create_text(\n 10, 170, text=\"E\", font=\"Times 16 italic bold\")\n\n for i in range(30,20000,30):\n pcanvas.create_line(\n i,20,i,170,\n fill='grey')\n\n def convertCanv(self, pcoords):\n # Returns the partition coords with pcoords as canvas coords (from event)\n x = (pcoords[0]//30)-1\n y = (pcoords[1]-20)//30\n if(x < 0):\n x = 0\n if (y < 0):\n y = 0\n elif(y > 5):\n y = 5\n return([x,y])\n\n def convertPartition(self, pcoords):\n # Returns the canvas coords with pcoords as partition coords (from convertCanv())\n return([(pcoords[0]+1)*30,(pcoords[1]*30+20)])\n\n def isIn(self, p_converted_coords):\n if (p_converted_coords[0] < 650 and\n p_converted_coords[1] < 5 and\n p_converted_coords[0] > 0 and\n p_converted_coords[1] > 0):\n return True\n return False\n" } ]
3
skywalker610/Autonomous-Driving-Control
https://github.com/skywalker610/Autonomous-Driving-Control
db2c8dbb0024307580d88403f22a6a236e2ec229
ee23a0babceb31268470e0964a95b0105007ef7f
19d0f48565d96533f013fa2a4a510bc2f1cd37cb
refs/heads/main
2023-05-06T10:56:26.269036
2021-03-06T03:50:54
2021-03-06T03:50:54
341,632,208
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7598314881324768, "alphanum_fraction": 0.7724719047546387, "avg_line_length": 34.599998474121094, "blob_id": "61e95d410b161e5c3ccf7335276a244d2c59da42", "content_id": "c1d1cf78e309769b39084c25722eba9e9c3da887", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 712, "license_type": "no_license", "max_line_length": 265, "num_lines": 20, "path": "/README.md", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "# Control for VL22\n\nThis code is for drive by wire control of VL22787, includes drive by wire and control parts. All locate in control directory.\n\nCompile \n\nunder Control-AD \n\n\t$ catkin_make\n\nRun\n\nUnder Control-AD \n\n\t$ source devel/setup.bash\n\t$ roslaunch dbw dbw_control.launch\n\t\n\t\t \nNote: 1 dbw_control launch the gnss node, sbw_send node and truck_node node, receiving the gps data, control the steering wheel and support control algorithm\n 2 gps_common_msgs and hellocm_msgs are dependence files for truck.cpp, path_calib is for calibration of control, path_draw_Google can support drawing path on Google with given longitude and latitude data, path_recording is for recording gps data of waypoints.\n" }, { "alpha_fraction": 0.5791245698928833, "alphanum_fraction": 0.6077440977096558, "avg_line_length": 15.44444465637207, "blob_id": "0a0805c4ff6fa6c08a0516d0bb7943893fd87b07", "content_id": "02977b2d14a6b7346d13a677710583b9e6628728", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 594, "license_type": "no_license", "max_line_length": 80, "num_lines": 36, "path": "/src/control/src/targetspeed.cpp", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "#include <ros/ros.h>\n#include <iostream>\n#include \"std_msgs/Float64.h\"\n\nint main(int argc, char **argv)\n{\n std_msgs::Float64 StrAng;\n\n float input;\n\n ros::init(argc, argv, \"angle\");\n\n ros::NodeHandle n;\n\n ros::Publisher pub=n.advertise<std_msgs::Int16MultiArray>(\"targetspeed\",10);\n\n ros::Rate loop_rate(100); //Hz, 10Hz = 100ms\n\n while(ros::ok()){\n\n\tStrAng.data.clear();\n\t\n\tstd::cout<<\"Enter the target speed: \";\t\n\t\n\tstd::cin>>input;\n\n StrAng.data.push_back(input);\n\n\t//std::cout<<StrAng<<\"\\n\";\n\n\tpub.publish(StrAng);\n\n \tloop_rate.sleep();\n }\n return 0;\n}\n\t\n" }, { "alpha_fraction": 0.5647484660148621, "alphanum_fraction": 0.6258467435836792, "avg_line_length": 23.967159271240234, "blob_id": "f3542949c3a27cb5ca1afb50427bec01709516fd", "content_id": "21342eb3d856e8b3394216cfcb197621b0f71f35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15205, "license_type": "no_license", "max_line_length": 216, "num_lines": 609, "path": "/src/control/src/bbw_send.cpp", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "// Isuzu Technical Center of America\n\n\n#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n\n#include \"std_msgs/MultiArrayLayout.h\"\n#include \"std_msgs/MultiArrayDimension.h\"\n#include \"std_msgs/Float64.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <net/if.h>\n#include <sys/ioctl.h>\n#include <sys/socket.h>\n\n#include <linux/can.h>\n#include <linux/can/raw.h>\n\n#include <iostream> // std::cout\n#include <thread> // std::thread\n\n\n/**\n * This tutorial demonstrates simple receipt of messages over the ROS system.\n */\n\n\nint16_t StrAng = 0; // not in use currently .....\nint8_t StrAng_byte1 = 0; // not in use currently .....\nint8_t StrAng_byte2 = 0; // not in use currently .....\n\nfloat target_speed = 0;\nfloat vehicle_speed = 0;\n\n\nbool brake_enable = 0;\nbool torque_enable = 0;\n\nfloat brake_demand_raw = 0; // range -15.687 ~ 15.687\nuint16_t brake_demand = ( brake_demand_raw + 15.687 ) * 2048; \nuint8_t brake_demand_byte1 = ( brake_demand & 0xff00 ) >> 8 ;\nuint8_t brake_demand_byte2 = brake_demand & 0xff;\n\n\n\nint nbytes; // for read can bus\n\n\n\nvoid chatterCallback(const std_msgs::Float64::ConstPtr& msg) \n{\n\n\ttarget_speed = msg->data;\n\n}\n\nvoid speed_control(){\n\n\tfloat speed_error = target_speed - vehicle_speed;\n\t\n\tif( speed_error < 0){\n\n\t\ttorque_enable = 0;\n\n\t\tbrake_enable = 1;\n\n\t\tint P_term = 2;\n\t\tbrake_demand_raw = speed_error*P_term;\n\t\tif( brake_demand_raw < -10){\n\t\t\tbrake_demand_raw = -10;\n\t\t}\n\t\tbrake_demand = ( brake_demand_raw + 15.687 ) * 2048; \n\t\tbrake_demand_byte1 = ( brake_demand & 0xff00 ) >> 8 ;\n\t\tbrake_demand_byte2 = brake_demand & 0xff;\n\t}else if (speed_error >= 0) {\n\t\t// this section for torque control in future\n\t\tbrake_enable = 0;\t\t\n\t\n\t}\n\n}\n\nint speed_read(can_frame &frame, int &s){\n\n\tnbytes = read(s, &frame, sizeof(struct can_frame));\n\n\tif (nbytes < 0) {\n\t perror(\"CAN Read Error ......\");\n\t return 1;\n\t}\n\n\tif (frame.can_id == (0x18fef100 | 0x80000000) ){ //read vehicle speed id\n\t\t\n\t\tuint16_t vehicle_speed_bytes = frame.data[2] | frame.data[1] << 8;\n\t\tvehicle_speed = vehicle_speed_bytes / 256.0;\n\n\t}\n\t\n\treturn 0;\n}\n\n\nvoid thread_EBR(can_frame &frame, int &s, int &counter, int &checksum){\n\n\t// External Brake Request\n\t//+++++hard code++++++//\n\t//int a[16] = {0,1,2,3,4,5,7,8,9,10,11,12,13,14,15,0};\n\n\tframe.can_id = 0x0c040b2a | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = brake_demand_byte1;//0x19;\n\tframe.data[1] = brake_demand_byte2;//0x2f;\n\n\n\n\tif (brake_enable){\n\t\tframe.data[2] = 0xe0;//XBR_control_mode = 2 maximum\t\t\n\t}else{\n\t\tframe.data[2] = 0xc0;//XBR_control_mode = 0 override disable\n\t}\n\n\n\n\tframe.data[3] = 0x00;\n\tframe.data[4] = 0xff;\n\tframe.data[5] = 0xff;\n\tframe.data[6] = 0xff;\n\n\tchecksum=frame.data[0]+frame.data[1]+frame.data[2]+frame.data[3]+frame.data[4]+frame.data[5]+frame.data[6]+(counter&0x0f)+0x2a+0x0b+0x04+0x0c;\n\tchecksum=((checksum>>4)+checksum)&0x0f;\n\n\t//std::cout << \"checksum = \"<< checksum <<\"\\n\";\n\n\t//checksum= a[counter];\n\n\t//std::cout << \"checksum =\"<< checksum <<\"\\n\";\n\n\t/*\n\t//debug\n\tif(counter == 0){\n\t\tint A = frame.data[0]+frame.data[1]+frame.data[2]+frame.data[3]+frame.data[4]+frame.data[5]+frame.data[6] ;\n\t\tint B = counter&0x0f;\n\t\tint C = 0x2a+0x0b+0x04+0x0c;\n\t\tstd::cout << \"checksum A =\"<< A <<\"\\n\";\n\t\tstd::cout << \"checksum B =\"<< B <<\"\\n\";\n\t\tstd::cout << \"checksum C =\"<< C <<\"\\n\";\n\t\tstd::cout << \"A+B+C= =\"<< A+B+C <<\"\\n\";\n\t\tint D = A+B+C;\t\t\n\t\tint E = (A+B+C)>>4;\n\t\tint F = (D+E)&0x0f;\n\t\tstd::cout << \"checksum = \"<< D <<\"\\n\";\n\t\n\t}\t\n\t*/\n\n\tframe.data[7] = (checksum<<4) | counter;\n\t\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"EBR Write error1\");\n\t}\n\n\n\tROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n}\n\nvoid thread_ADR(can_frame &frame, int &s){\n\n\t// ACC Display Request\n\tframe.can_id = 0x18ff1211 | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = 0xfd;\n\tframe.data[1] = 0xff;\n\tframe.data[2] = 0x01;\n\tframe.data[3] = 0xff; // signal: set_vehicle_speed_indication: 0:disable; 1~FA:enable with speed\n\tframe.data[4] = 0x00;\n\n\tif(brake_enable){\n\t\tframe.data[5] = 0xbf; // alarm on\n\t}else{\n\t\tframe.data[5] = 0x3f; // alarm off\n\t}\n\n\tframe.data[6] = 0x1f;// signal: (bit 6-8) acc_main_set\n\tframe.data[7] = 0x00;\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\t//std::cout << \"CAN ID ADR is transmitting ...........................................\\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n}\n\nvoid thread_AD(can_frame &frame, int &s){\n\n\t// Transmit ACC Data\n\tframe.can_id = 0x18ff1311 | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = 0xc8;\n\tframe.data[1] = 0x00;\n\tframe.data[2] = 0x7d;\n\tframe.data[3] = 0x7d;\n\tframe.data[4] = 0xfa;\n\tframe.data[5] = 0x4c; \n\tframe.data[6] = 0x00;\n\tframe.data[7] = 0x00;\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\n\t//std::cout << \"CAN ID AD is transmitting ...........................................\\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n}\n\nvoid thread_ACR2(can_frame &frame, int &s, int &counter){\n\n\n\t// Transmit ACR2 (acc control request)\n\tframe.can_id = 0x0cff5211 | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = 0x14;//14:speed ctrl; 24: torque ctrl\n\tframe.data[1] = 0x00;//ff\n\tframe.data[2] = 0x00;//ff\n\tframe.data[3] = 0xfa;//ff //signal: targe_cruise_set_speed\n\tframe.data[4] = 0xff;//ff //signal: targe_cruise_set_speed\n\tframe.data[5] = 0x00;\n\tframe.data[6] = 0x10;//80\n\tframe.data[7] = counter; //this is a counter or not ??????????????????\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\n\t//std::cout << \"CAN ID ACR2 is transmitting ................................... \\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n}\n\nvoid thread_LDWSD(can_frame &frame, int &s){\n\n\t// Transmit LDWS Data\n\tframe.can_id = 0x10ff5311 | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = 0x00;\n\tframe.data[1] = 0x06;\n\tframe.data[2] = 0x0f;\n\tframe.data[3] = 0x0f;\n\tframe.data[4] = 0xfa; \n\tframe.data[5] = 0xfa; \n\tframe.data[6] = 0xff; //a0 not 20, see kenji-san reply\n\tframe.data[7] = 0xff;\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\n\n\t//std::cout << \"CAN ID LDWSD is transmitting ................................... \\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n}\n\nvoid thread_CAMINFO(can_frame &frame, int &s){\n\n\n\t// Transmit \n\tframe.can_id = 0x10FF6311 | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = 0x00;\n\tframe.data[1] = 0x0c;//0c\n\tframe.data[2] = 0xff;//ff\n\tframe.data[3] = 0xff;//ff\n\tframe.data[4] = 0x7f;//7f \n\tframe.data[5] = 0x3f;//07 \n\tframe.data[6] = 0xff;//ff\n\tframe.data[7] = 0xff;//ff\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\n\n\t//std::cout << \"CAN ID CAMINFO is transmitting ................................... \\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n\n}\n\nvoid thread_TSC1_FACC(can_frame &frame, int &s, int &counter, int &checksum){\n\n\t// Transmit torque/speed control #1 (FACC)\n\tframe.can_id = 0x0C000011 | 0x80000000;\n\tframe.can_dlc = 8;\n\tif(torque_enable){\n\t\tframe.data[0] = 0xfe; // torque ctrl: enable:fe; disable:fc?\n\t}else{\n\t\tframe.data[0] = 0xfc; // torque ctrl: enable:fe; disable:fc?\n\t}\n\tframe.data[1] = 0x70; // 70 request_speed_limit_speed: 0-faff: 0-8031.875 rpm\n\tframe.data[2] = 0xff; // ff request_speed_limit_speed: 0-faff: 0-8031.875 rpm\n\tframe.data[3] = 0xa0; // a0 request_torque_limit: 0-fa: -125% - 125%\n\tframe.data[4] = 0xff;\n\tframe.data[5] = 0xf0;\n\tframe.data[6] = 0xff;\n\tchecksum=frame.data[0]+frame.data[1]+frame.data[2]+frame.data[3]+frame.data[4]+frame.data[5]+frame.data[6]+(counter&0x0f)+0x11+0x00+0x00+0x0c;\n\tchecksum=(((checksum>>6)&0x03)+(checksum>>3)+checksum)&0x07;\n\tframe.data[7] = (checksum<<4) | counter;\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\n\n\t//std::cout << \"CAN ID TSC1_FACC is transmitting ................................... \\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n\n}\n\nvoid thread_CAMINFO2(can_frame &frame, int &s){\n\n\t// Transmit \n\tframe.can_id = 0x18ffa111 | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = 0x0f;\n\tframe.data[1] = 0x0f;\n\tframe.data[2] = 0xff;\n\tframe.data[3] = 0xa0; // what's the logic? need to test various set speed? ...\n\tframe.data[4] = 0x3f; \n\tframe.data[5] = 0xff; \n\tframe.data[6] = 0xff;\n\tframe.data[7] = 0xff;\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\n\t//std::cout << \"CAN ID CAMINFO2 is transmitting ................................... \\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n}\n\nvoid thread_AD2(can_frame &frame, int &s){\n\n\n\t// Transmit ACC Data2 \n\tframe.can_id = 0x18ff3511 | 0x80000000;\n\tframe.can_dlc = 8;\n\tframe.data[0] = 0x00;\n\tframe.data[1] = 0x00;\n\tframe.data[2] = 0x00;\n\tframe.data[3] = 0xff;\n\tframe.data[4] = 0xff; \n\tframe.data[5] = 0xff; \n\tframe.data[6] = 0xff;\n\tframe.data[7] = 0xff;\n\n\n\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\tperror(\"Write error\");\n\t}\n\n\t//std::cout << \"CAN ID AD2 is transmitting ................................... \\n\";\n\t//ROS_INFO(\"CAN ID: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n}\n\n\n\n\nint main(int argc, char **argv)\n{\n\t/**\n\t* The ros::init() function needs to see argc and argv so that it can perform\n\t* any ROS arguments and name remapping that were provided at the command line.\n\t* For programmatic remappings you can use a different version of init() which takes\n\t* remappings directly, but for most command-line programs, passing argc and argv is\n\t* the easiest way to do it. The third argument to init() is the name of the node.\n\t*\n\t* You must call one of the versions of ros::init() before using any other\n\t* part of the ROS system.\n\t*/\n\tros::init(argc, argv, \"sbw_send\");\n\n\t/**\n\t* NodeHandle is the main access point to communications with the ROS system.\n\t* The first NodeHandle constructed will fully initialize this node, and the last\n\t* NodeHandle destructed will close down the node.\n\t*/\n\tros::NodeHandle n;\n\n\n\t/*if (close(s) < 0) {\n\tperror(\"Close\");\n\t}*/\n\t/**\n\t* The subscribe() call is how you tell ROS that you want to receive messages\n\t* on a given topic. This invokes a call to the ROS\n\t* master node, which keeps a registry of who is publishing and who\n\t* is subscribing. Messages are passed to a callback function, here\n\t* called chatterCallback. subscribe() returns a Subscriber object that you\n\t* must hold on to until you want to unsubscribe. When all copies of the Subscriber\n\t* object go out of scope, this callback will automatically be unsubscribed from\n\t* this topic.\n\t*\n\t* The second parameter to the subscribe() function is the size of the message\n\t* queue. If messages are arriving faster than they are being processed, this\n\t* is the number of messages that will be buffered up before beginning to throw\n\t* away the oldest ones.\n\t*/\n\tros::Subscriber sub = n.subscribe(\"target_speed\", 1000, chatterCallback); \n\n\t/**\n\t* ros::spin() will enter a loop, pumping callbacks. With this version, all\n\t* callbacks will be called from within this thread (the main one). ros::spin()\n\t* will exit when Ctrl-C is pressed, or the node is shutdown by the master.\n\t*/\n\t//ros::spin();\n\n\tros::Rate loop_rate(100); //Hz, 50Hz = 10ms\n\n\n\t// add threads\n\n\t//std::thread first (thread_EBR); \n\t/*\t\n\tstd::thread second (thread_ADR); \n\tstd::thread third (thread_AD); \n\tstd::thread fouth (thread_ACR2); \n\tstd::thread fifth (thread_LDWSD); \n\tstd::thread sixth (thread_CAMINFO); \n\tstd::thread seventh (thread_TSC1_FACC); \n\tstd::thread eighth (thread_CAMINFO2); \n\tstd::thread ninth (thread_AD2); \n\t*/\n\n\n\n\n\tstruct sockaddr_can addr; // socketCAN\n\tstruct ifreq ifr;\n\tstruct can_frame frame;\n\tint s;\n\n\n\n\tif ((s = socket(PF_CAN, SOCK_RAW, CAN_RAW)) < 0) {\n\tperror(\"Socket error\");\n\t}\n\tstrcpy(ifr.ifr_name, \"can0\" );\n\tioctl(s, SIOCGIFINDEX, &ifr);\n\tmemset(&addr, 0, sizeof(addr));\n\taddr.can_family = AF_CAN;\n\taddr.can_ifindex = ifr.ifr_ifindex;\n\tif (bind(s, (struct sockaddr *)&addr, sizeof(addr)) < 0) {\n\tperror(\"Bind error\");\n\t}\n\n\n\n\tint i = 1;\n\tint counter_EBR = 0;\n\tint counter_ACR2 = 0;\n\tint counter_TSC1 = 0;\n\tint checksum_EBR;\n\tint checksum_TSC1;\n\n\twhile (ros::ok())\n\t{\n\n\t\tros::spinOnce(); // if there is not data transmitted from the topic, spinOnce() will take a long time! and the actual execution time will be longer than loop_rate()!\n\n\t\tstd::cout << \"Main loop number = \" << i << \" \\n\";\n\n\n\t\t// for 10ms messages\n\n\t\tif(i%1 == 0){\n\n\t\t\t// transmit ACR2\n\n\t\t\tthread_ACR2(frame, s, counter_ACR2);\n\n\t\t\tif(counter_ACR2 >= 255){\n\t\t\t\tcounter_ACR2 = 0;\t\t\n\t\t\t}else{\n\t\t\t\tcounter_ACR2++;\n\t\t\t}\n\n\n\t\t\t// transmit CAMINFO\n\n\t\t\tthread_CAMINFO(frame, s);\n\n\n\t\t\t// transmit TSC1_FACC torque/speed control #1\n\n\t\t\tthread_TSC1_FACC(frame, s, counter_TSC1, checksum_TSC1);\n\n\t\t\tif(counter_TSC1 >= 7){\n\t\t\t\tcounter_TSC1 = 0;\t\t\n\t\t\t}else{\n\t\t\t\tcounter_TSC1++;\n\t\t\t}\n\n\n\n\n\t\t\t// read vehicle speed\n\t\t\tspeed_read(frame, s);\n\n\n\t\t\t// execute speed controller\n\t\t\tspeed_control();\n\n\n\n\n\n\n\n\t\t}\t\t\n\n\n\n\n\t\t// for 100 ms messages\n\n\t\tif(i%10 == 0){\n\t\t\n\t\t\t// transmit ADR\n\n\t\t\tthread_ADR(frame, s);\n\n\n\t\t\t// transmit AD\n\n\t\t\tthread_AD(frame, s);\n\n\n\t\t\t// transmit LDWSD\n\n\t\t\tthread_LDWSD(frame, s);\n\n\n\t\t\t\n\t\t\t// transmit CAMINFO2\n\t\t\tthread_CAMINFO2(frame, s);\n\n\n\t\t\t// transmit AD2\n\n\t\t\tthread_AD2(frame, s);\n\t\t\t\n\n\t\t}\n\n\n\t\t// for 20 ms messages\n\n\t\tif(i%2 == 0){\n\n\n\t\t\t// transmit EBR\n\t\t\n\t\t\tthread_EBR(frame, s, counter_EBR, checksum_EBR);\n\n\t\t\tif(counter_EBR >= 15){\n\t\t\t\tcounter_EBR = 0;\t\t\n\t\t\t}else{\n\t\t\t\tcounter_EBR++;\n\t\t\t}\n\n\t\t}\n\n\n\n\t\ti++;\n\n\n\n\n\t\t\n\t\tloop_rate.sleep();\n\n\n\n\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5679985880851746, "alphanum_fraction": 0.6062832474708557, "avg_line_length": 24.679447174072266, "blob_id": "04abe433741e6cf9e4ec13834e632dae444e0e1e", "content_id": "b00adfd14ff30536d66bd9902b33818c1649114a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 16743, "license_type": "no_license", "max_line_length": 263, "num_lines": 652, "path": "/src/path_recording/main.cpp", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "//1023 mx\n\n#include <ros/ros.h>\n#include <ros/package.h> // for ros::package::getPath()\n\n#include \"std_msgs/String.h\" // for sending msg type: std_msgs/String\n#include \"std_msgs/Int16.h\" // for sending msg type: std_msgs/String\n#include \"std_msgs/Float32MultiArray.h\"\n#include \"std_msgs/Float64MultiArray.h\"\n\n#include <iostream>\n#include <fstream>\n#include <sstream>\n#include <iomanip> // for cout double with precision(6)\n#include <cmath>\n\n\n#include <stdio.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <sys/types.h>\n#include <netinet/in.h>\n\n\n\n#include \"boost/asio.hpp\"\n#include <boost/array.hpp>\n#include <boost/bind.hpp>\n\n#include <thread>\n\n#include <math.h>\n#include <chrono>\n#include <vector>\n\n\n\nusing namespace std;\n\n\n#include \"std_msgs/Float64.h\" //ROS\n\n#include <visualization_msgs/Marker.h>//for publishing markers to rviz\n\n#include <tf/transform_broadcaster.h> // for broadcasting a frame\n\n#include <tf/transform_datatypes.h> // for publishing vehicle yaw using quaternion\n\n\n\ndouble gnss_arr[2];\nfloat hedg_arr[1];\n\nbool pos_up = false;\nbool ang_up = false;\n\n\n\nfloat hedg2ENU (float hedg) // for simulation, range from -Pi to Pi\n{\n\thedg = 3.1415926f*2.0f - hedg/180.0f*3.1415926f;\n\thedg = hedg - 3.1415926f*3.0f/2.0f;\n\tif ( hedg < -3.1415926f ){\n\t\thedg = hedg + 2.0f*3.1415926f;\n\t}\n\treturn hedg;\n}\n\n\n\n\nvoid gnssCallback(const std_msgs::Float64MultiArray::ConstPtr& msg)\n{\n\n\tint i = 0;\n\n\n\tfor(std::vector<double>::const_iterator it = msg->data.begin(); it != msg->data.end(); ++it)\n\t{\n\t gnss_arr[i] = *it;\n\t i++;\n\t}\n\n\tint j = 0;\n\tfor(j = 0; j < 2; j++)\n\t{\n\t\tprintf(\"%0.7f, \", gnss_arr[j]);\n\t}\n\tprintf(\"\\n\");\n\n\tROS_INFO(\"I heard gnss: [%0.7f, %0.7f]\", gnss_arr[0],gnss_arr[1]);\n\n\tpos_up = true;\n\n\treturn;\n}\n\nvoid hedgCallback(const std_msgs::Float32MultiArray::ConstPtr& msg)\n{\n\n\tint i = 0;\n\n\n\tfor(std::vector<float>::const_iterator it = msg->data.begin(); it != msg->data.end(); ++it)\n\t{\n\t hedg_arr[i] = *it;\n\t i++;\n\t}\n\n\tint j = 0;\n\tfor(j = 0; j < 1; j++)\n\t{\n\t\tprintf(\"%f, \", hedg_arr[j]);\n\t}\n\tprintf(\"\\n\");\n\n\tROS_INFO(\"I heard heading: [%f]\", hedg_arr[0]);\n\n\tang_up = true;\n\n\treturn;\n}\n\nvoid GNSS_read()\n{\n\twhile(ros::ok())\n\t{\n\t\tros::spinOnce();\n\t}\n}\n\n\n\nint main ( int argc, char**argv){\n\n\t//Initialize the ROS system.\n\tros::init(argc, argv, \"gps_recording\");\n\n\t// Establish this program as a ROS node.\n\tros::NodeHandle n;\n\n\t// Create publisher object\n\tros::Publisher marker_pub = n.advertise<visualization_msgs::Marker>(\"visualization_markers\", 1000);\n\n\tros::Publisher float_pub = n.advertise<std_msgs::Float64>(\"float\", 1000);\n\n\tros::Publisher str_pub = n.advertise<std_msgs::Int16>(\"steering_angle\", 1000);\n\n\n\tros::Subscriber gnss_sub = n.subscribe(\"gnss_msg\", 1, gnssCallback);\n\tros::Subscriber hedg_sub = n.subscribe(\"ang_msg\", 1, hedgCallback);\n\n\n\tros::spinOnce();\n\n\t// thread GNSSread(GNSS_read); //????????????????????\n\n \tros::Rate rate(30);\n\n\t//Send some output as a log message.\n\tROS_INFO_STREAM(\"Hello, ROS!\");\n\n\t// Broadcast a frame\n\ttf::TransformBroadcaster br;\n\ttf::Transform transform;\n\n\n\t//Parameters\n\n\tint i = 1;\n\n\tdouble lon, lat;\n\tfloat x0, y0, hedg, x0_pre, y0_pre; // vehicle ENU coordinate\n\tfloat x0_frt, y0_frt;// vehicle front head\n\tfloat d_frt = 3.0 ; // vehicle front head distance (m)\n\n\tdouble wps_lon[3000], wps_lat[3000];\n\tfloat wps_x_raw[3000], wps_y_raw[3000];\n\n\tdouble in_lat, in_lon;\n\tint lat_num;\n\tint lon_num;\n\n\tfloat wps_x[3000], wps_y[3000]; \n\tint wps_num = 0;\n\n\tfloat wps_step;\n\n\n\tdouble lat_ref, lon_ref;\n\tint idx_glb, idx;\n\t//float x1, x2, y1, y2, x3, y3;\n\tdouble x1, x2, y1, y2, x3, y3, x4, y4;\n\tfloat ang1, ang2;\n\tfloat d_v2seg_min = 1000;\n\tfloat d_v2seg;\n\n\tint wps_idx; //global 0, 1, 2, ...\n\n\n\tfloat d_o2v; // circle center to vechile\n\tint Kp, Kd;\n\tfloat offset_ctrl;\n\tfloat offset_ctrl_pre = 0;\n\tfloat d_offset;\n\tfloat str_step_ref;\n\tfloat str_step;\n\tint str_cmd; // must be int\n\tfloat d_p2line;\n\tint side2line;\n\n\tfloat theta1, theta3, d_theta;// for plotting trajectory plan\n\tfloat traj_x[100], traj_y[100];\n\n\n\tfloat d_travel = 0.1;//simulation\n\n\t//double add_x0, add_y0, add_hedg; // simulation\n\tfloat add_x0, add_y0, add_hedg; // simulation\n\tfloat add_cx0, add_cy0, add_r;\n\n\n\n\n\t// print to user\n\tcout << \"Hello World!\" << endl;\n\n\n\n\t// Geodetic to ENU\n\tlon_ref = -83.5005484;\n\tlat_ref = 42.3842014;\n\n\n\n\n\n\t// Get GPS =================================================================================\n\n\n\twhile (pos_up != true && ang_up != true) // ensure callback function is receiving data\n\t{\n\t\tros::spinOnce();\n\t\t\n\t}\n\n\n\t// while (lat == 0 || lon == 0) // ensure correct data received \n\t// {\n\t// \tros::spinOnce();\n\t// \tprintf(\"lat = %0.7f\\n\", lat);\n\t// }\n\t// exit(0);\n\n\tros::spinOnce(); // receiving updated data\n\n\tlat = gnss_arr[0];\t\t\t\t\t\t\n\tlon = gnss_arr[1];\t\t\t\t\t\t\n\thedg = hedg_arr[0];\t\n\n\n\tx0 = (lon - lon_ref)*82230.670304; //\n\ty0 = (lat - lat_ref)*111132.944444; // lon_lat difference transfer into distance difference\n\thedg = hedg2ENU(hedg);// convert gnss orientation from 0, 360 to ENU orientation from -pi, pi\n\tx0_frt = x0 + d_frt*cos(hedg); \n\ty0_frt = y0 + d_frt*sin(hedg);\n\n\t\n\tx0_pre = x0;\n\ty0_pre = y0;\n\t\n\t/*\n\tcout << \"x0\" << \":\" << x0 << \"\\n\";\n\tcout << \"y0\" << \":\" << y0 << \"\\n\";\n\t*/\n\t\n\n\n\n\t// While loop\n\twhile(ros::ok()){\n\n\t\tcout<<\"while loop: Value of variable i is: \"<<i << \"-------------------------------------------------------------------\" <<endl;\n\t\ti++;\n\t\t// exit(0);\n\n\t\t// Publish waypoints and lables for rviz visualization==========================================================================\n\n\n\n\t\t// Get GPS\n\n\n\t\tpos_up = false;\n\t\tang_up = false;\n\n\n\t\twhile (pos_up != true && ang_up != true) // ensure callback function is receiving data\n\t\t{\n\t\t\tros::spinOnce();\n\t\t}\n\n\n\t\twhile (lat == 0 || lon == 0) // ensure correct data received \n\t\t{\n\t\t\tros::spinOnce();\n\t\t}\n\n\t\tros::spinOnce(); // receiving updated data\n\n\t\tlat = gnss_arr[0];\t\t\t\t\t\t\n\t\tlon = gnss_arr[1];\t\t\t\t\t\t\n\t\thedg = hedg_arr[0];\t\n\t\t\n\t\tprintf(\"lat = %0.7f\\n\", lat);\n\t\tprintf(\"lon = %0.7f\\n\", lon);\n\t\tprintf(\"heading = %f\\n\", hedg);\n\n\n\t\tx0 = (lon - lon_ref)*82230.670304;\n\t\ty0 = (lat - lat_ref)*111132.944444;\n\t\thedg = hedg2ENU(hedg);// convert gnss orientation from 0, 360 to ENU orientation from -pi, pi\n\n\t\tprintf(\"x0: %f, y0: %f\\n\",x0, y0);\n\n\n\t\tx0_frt = x0 + d_frt*cos(hedg); \n\t\ty0_frt = y0 + d_frt*sin(hedg);\n\n\t\twps_step = (x0 - x0_pre)*(x0 - x0_pre) + (y0 - y0_pre)*(y0 - y0_pre);\n\n\t\t//exit(0); // debug -------------------------------------------------\n\n\n\n\t\tif(i==1){\n\t\t\tofstream write_lon;\n\t\t\twrite_lon.open(\"lon_raw.txt\"); // to clear data in lon_raw.txt\t\t\t\n\t\t\twrite_lon.close();\n\n\t\t\tofstream write_lat;\n\t\t\twrite_lat.open(\"lat_raw.txt\"); // to clear data in lat_raw.txt\t\n\t\t\twrite_lat.close();\n\t\t}\n\n\n\n\t\tif(wps_step >= 3){\n\t\t\tofstream write_lon;\n\t\t\twrite_lon.open(\"lon_raw.txt\", ios::app); // adding ios:app will write data below the last line\n\t\t\twrite_lon << std::fixed << std::setprecision(7) << lon << endl;\n\t\t\twrite_lon.close();\n\n\t\t\tofstream write_lat;\n\t\t\twrite_lat.open(\"lat_raw.txt\", ios::app); // adding ios:app will write data below the last line\n\t\t\twrite_lat << std::fixed << std::setprecision(7) << lat << endl;\n\t\t\twrite_lat.close();\n\n\t\t\tx0_pre = x0;\n\t\t\ty0_pre = y0;\n\n\n\t\t\t// publish vehicle's coordinates using visualization_msgs::Marker::POINTS\n\t\t\tvisualization_msgs::Marker points;\n\t\t\tpoints.header.frame_id = \"frame\";\n\t\t\tpoints.header.stamp = ros::Time::now();\n\t\t\tpoints.ns = \"waypoints\";\n\t\t\tpoints.action = visualization_msgs::Marker::ADD;\n\t\t\tpoints.pose.orientation.w = 1.0;\n\t\t\tpoints.id = i; // %Tag(ID)%\n\t\t\tpoints.type = visualization_msgs::Marker::POINTS; \n\t\t\tpoints.scale.x = 2;// %Tag(SCALE)% POINTS markers use x and y scale for width/height respectively\n\t\t\tpoints.scale.y = 2;\n\t\t\tpoints.color.g = 1.0f;// %Tag(COLOR)% // Points are green\n\t\t\tpoints.color.a = 1.0;\n\n\t\t\tgeometry_msgs::Point p;\n\t\t\tp.x = x0;\n\t\t\tp.y = y0;\n\t\t\tpoints.points.push_back(p);\n\n\t\t\tmarker_pub.publish(points);\n\t\t}\n\n\n\n\t\t//rate.sleep();\n\n/*\n\n\n\n\t\tif(i<50){ // need to publish for 10+ times and then received by rviz, why?\n\t\t\t// Publish waypints using visualization_msgs::Marker::POINTS================================\n\t\t\tvisualization_msgs::Marker points;// %Tag(MARKER_INIT)%\n\t\t\tpoints.header.frame_id = \"frame\";\n\t\t\tpoints.header.stamp = ros::Time::now();\n\t\t\tpoints.ns = \"waypoints\";\n\t\t\tpoints.action = visualization_msgs::Marker::ADD;\n\t\t\tpoints.pose.orientation.w = 1.0;\n\t\t\tpoints.id = 0; // %Tag(ID)%\n\t\t\tpoints.type = visualization_msgs::Marker::POINTS; // %Tag(TYPE)%\n\t\t\tpoints.scale.x = 0.2;// %Tag(SCALE)% POINTS markers use x and y scale for width/height respectively\n\t\t\tpoints.scale.y = 0.2;\n\t\t\tpoints.color.g = 1.0f;// %Tag(COLOR)% // Points are green\n\t\t\tpoints.color.a = 1.0;\n\n\t\t\tfor(int j = 0; j < wps_num; j++){\n\t\t\t\tgeometry_msgs::Point p;\n\t\t\t\tp.x = wps_x[j];\n\t\t\t\tp.y = wps_y[j];\n\t\t\t\tpoints.points.push_back(p);\n\t\t\t}\n\t\t\tmarker_pub.publish(points);\n\n\t\t\t// Publish raw waypints using visualization_msgs::Marker::POINTS=============================\n\t\t\tvisualization_msgs::Marker points1;// %Tag(MARKER_INIT)%\n\t\t\tpoints1.header.frame_id = \"frame\";\n\t\t\tpoints1.header.stamp = ros::Time::now();\n\t\t\tpoints1.ns = \"waypoints_raw\";\n\t\t\tpoints1.action = visualization_msgs::Marker::ADD;\n\t\t\tpoints1.pose.orientation.w = 1.0;\n\t\t\tpoints1.id = 0; // %Tag(ID)%\n\t\t\tpoints1.type = visualization_msgs::Marker::POINTS; // %Tag(TYPE)%\n\t\t\tpoints1.scale.x = 0.5;// %Tag(SCALE)% POINTS markers use x and y scale for width/height respectively\n\t\t\tpoints1.scale.y = 0.5;\n\t\t\tpoints1.color.g = 1.0f;// %Tag(COLOR)% // Points are green\n\t\t\tpoints1.color.r = 0.0f;// red\n\t\t\tpoints1.color.a = 1.0;\n\n\t\t\tfor(int j = 0; j < wps_num_raw; j++){\n\t\t\t\tgeometry_msgs::Point p1;\n\t\t\t\tp1.x = wps_x_raw[j];\n\t\t\t\tp1.y = wps_y_raw[j];\n\t\t\t\tpoints1.points.push_back(p1);\n\t\t\t}\n\t\t\tmarker_pub.publish(points1);\n\n\n\t\t\t// Publish waypoint's label using Marker\n\t\t\tfor(int j = 0; j < wps_num; j++){\n\t\t\t\tvisualization_msgs::Marker label;\n\t\t\t\tlabel.header.frame_id=\"frame\";\n\t\t\t\tlabel.header.stamp = ros::Time::now();\n\t\t\t\tlabel.ns = \"waypoint_label\";\n\t\t\t\tlabel.action = visualization_msgs::Marker::ADD;\n\t\t\t\tlabel.pose.orientation.w = 1.0;\n\t\t\t\tlabel.id =j; // Marker id should be unique. Any marker sent with the same namespace and id will overwrite the old one\n\t\t\t\tlabel.type = visualization_msgs::Marker::TEXT_VIEW_FACING;\n\n\t\t\t\tlabel.scale.x = 0.5;\n\t\t\t\tlabel.scale.y = 0.5;\n\t\t\t\tlabel.scale.z = 0.5;\n\n\t\t\t\tlabel.color.b = 1.0f;\n\t\t\t\tlabel.color.g = 1.0f;\n\t\t\t\tlabel.color.r = 1.0f;\n\t\t\t\tlabel.color.a = 1.0;\n\n\t\t\t\tlabel.pose.position.x = wps_x[j]; //???????????????????????????????????????????\n\t\t\t\tlabel.pose.position.y = wps_y[j];\n\t\t\t\tlabel.pose.position.z = -2.0;\n\t\t\t\tlabel.pose.orientation.x = 0.0;\n\t\t\t\tlabel.pose.orientation.y = 0.0;\n\t\t\t\tlabel.pose.orientation.z = 0.0;\n\t\t\t\tlabel.pose.orientation.w = 1.0;\n\t\t\t\tostringstream str;\n\t\t\t\tstr<<j;\n\t\t\t\tlabel.text=str.str();\n\t\t\t\tmarker_pub.publish(label);\n\t\t\t}\n\n\t\t}\n\n\n\t\t// publish vehicle's coordinates: Marker - line_list ====================================================\n\t\tvisualization_msgs::Marker line_list;// %Tag(MARKER_INIT)%\n\t\tline_list.header.frame_id = \"frame\";\n\t\tline_list.header.stamp = ros::Time::now();\n\t\tline_list.ns = \"mkz_line_list\";\n\t\tline_list.action = visualization_msgs::Marker::ADD;\n\t\tline_list.pose.orientation.w = 1.0;\n\t\tline_list.id = i; // Marker id should be unique\n\t\tline_list.type = visualization_msgs::Marker::LINE_LIST;\n\t\tline_list.scale.x = 0.2;// LINE_STRIP/LINE_LIST markers use only the x component of scale, for the line width\n\t\tline_list.color.r = 1.0; //red\n\t\tline_list.color.a = 1.0;\n\t\tgeometry_msgs::Point p;\n\t\tp.x = x0;\n\t\tp.y = y0;\n\t\tline_list.points.push_back(p);\n\t\tp.x = x0_frt;\n\t\tp.y = y0_frt;\n\t\tline_list.points.push_back(p);\n\t\tmarker_pub.publish(line_list);\n\n\t\t// publish 3d vehicle frame: Marker - cube =================================================================\n\t\tvisualization_msgs::Marker box1;\n\t\tvisualization_msgs::Marker box2;\n\t\tbox1.header.frame_id = box2.header.frame_id = \"frame\";\n\t\tbox1.header.stamp = box2.header.stamp = ros::Time::now();\n\t\tbox1.ns = \"3d_vehicle_cubic1\";\n\t\tbox2.ns = \"3d_vehicle_cubic2\";\n\t\tbox1.id = 0;\n\t\tbox2.id = 0;\n\t\tuint32_t shape = visualization_msgs::Marker::CUBE;\n\t\tbox1.type = box2.type = shape;\n\t\tbox1.action = box2.action = visualization_msgs::Marker::ADD;\n\t\tbox1.pose.position.x = x0;\n\t\tbox1.pose.position.y = y0;\n\t\tbox1.pose.position.z = 2;\n\t\tbox2.pose.position.x = x0_frt;\n\t\tbox2.pose.position.y = y0_frt;\n\t\tbox2.pose.position.z = 1.6;\n\t\t//box.pose.orientation.x = 1.0;\n\t\t//box.pose.orientation.y = 1.0;\n\t\t//box.pose.orientation.z = 1.0;\n\t\t//box.pose.orientation.w = 1.0;\n\t\tbox1.pose.orientation = box2.pose.orientation = tf::createQuaternionMsgFromYaw(hedg); //convert euler (yaw) to quaternion for rviz visualization; header file: <tf/transform_datatypes.h>\n\n\t\tbox1.scale.x = 4.0;\n\t\tbox1.scale.y = 2.3;\n\t\tbox1.scale.z = 2.5;\n\n\t\tbox2.scale.x = 1.2;\n\t\tbox2.scale.y = 2.0;\n\t\tbox2.scale.z = 1.8;\n\n\t\tbox1.color.r = box2.color.r = 1.0f;\n\t\tbox1.color.g = box2.color.g = 1.0f;\n\t\tbox1.color.b = box2.color.b = 1.0f;\n\t\tbox1.color.a = 0.8;\n\t\tbox2.color.a = 1.0;\n\n\t\tbox1.lifetime = box2.lifetime = ros::Duration();\n\t\tmarker_pub.publish(box1);\n\t\tmarker_pub.publish(box2);\n\n\n\t\t//publish 4 wheels: Marker - cylinder ======================================================================\n\n\t\tfloat frx, fry, flx, fly, rrx, rry, rlx, rly;\n\t\tfloat d1 = 2.5; // wheelbase\n\t\tfloat d2 = 1.0; // half track distance\n\n\t\tfrx = x0 + cos(hedg)*d1 - sin(hedg)*(-d2);\n\t\tfry = y0 + sin(hedg)*d1 + cos(hedg)*(-d2);\n\t\tflx = x0 + cos(hedg)*d1 - sin(hedg)*(d2);\n\t\tfly = y0 + sin(hedg)*d1 + cos(hedg)*(d2);\n\t\trrx = x0 - sin(hedg)*(-d2);\n\t\trry = y0 + cos(hedg)*(-d2);\n\t\trlx = x0 - sin(hedg)*(d2);\n\t\trly = y0 + cos(hedg)*(d2);\n\n\t\tvisualization_msgs::Marker frw, flw, rrw, rlw; //front left wheel\n\t\tfrw.header.frame_id = flw.header.frame_id = rrw.header.frame_id = rlw.header.frame_id = \"frame\";\n\t\tfrw.header.stamp = flw.header.stamp = rrw.header.stamp = rlw.header.stamp = ros::Time::now();\n\t\tfrw.ns = flw.ns = rrw.ns = rlw.ns = \"wheel\";\n\t\tfrw.id = 0;\n\t\tflw.id = 1;\n\t\trrw.id = 2;\n\t\trlw.id = 3;\n\n\t\tuint32_t shape1 = visualization_msgs::Marker::CYLINDER;\n\n\t\tfrw.type = flw.type = rrw.type = rlw.type = shape1;\n\t\tfrw.action = flw.action = rlw.action = rrw.action = visualization_msgs::Marker::ADD;\n\t\tfrw.pose.position.x = frx;\n\t\tfrw.pose.position.y = fry;\n\t\tflw.pose.position.x = flx;\n\t\tflw.pose.position.y = fly;\n\t\trrw.pose.position.x = rrx;\n\t\trrw.pose.position.y = rry;\n\t\trlw.pose.position.x = rlx;\n\t\trlw.pose.position.y = rly;\n\n\t\tfrw.pose.orientation = flw.pose.orientation = rrw.pose.orientation = rlw.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw( 0.0f, 1.5707f,hedg+1.5707f); //convert euler (yaw) to quaternion for rviz visualization; header file: <tf/transform_datatypes.h>\n\n\t\tfrw.scale.x = flw.scale.x = rrw.scale.x = rlw.scale.x = 1.0;\n\t\tfrw.scale.y = flw.scale.y = rrw.scale.y = rlw.scale.y = 1.0;\n\t\tfrw.scale.z = flw.scale.z = 0.25;\n\t\trrw.scale.z = rlw.scale.z = 0.5;\n\n\t\tfrw.color.r = flw.color.r = rrw.color.r = rlw.color.r = 0.0f;\n\t\tfrw.color.g = flw.color.g = rrw.color.g = rlw.color.g = 0.0f;\n\t\tfrw.color.b = flw.color.b = rrw.color.b = rlw.color.b = 0.0f;\n\t\tfrw.color.a = flw.color.a = rrw.color.a = rlw.color.a = 1.0;\n\n\t\tfrw.lifetime = flw.lifetime = rrw.lifetime = rlw.lifetime = ros::Duration();\n\n\t\tmarker_pub.publish(frw);\n\t\tmarker_pub.publish(flw);\n\t\tmarker_pub.publish(rrw);\n\t\tmarker_pub.publish(rlw);\n\n\n\t\t//publish ISUZU logos using visualization_msgs::Marker::TEXT_VIEW_FACING ======================================================================\n\n\t\tfloat logo1x, logo1y;\n\t\td1 = 0.0; // wheelbase\n\t\td2 = 0.0; // half track distance\n\n\t\tlogo1x = x0 + cos(hedg)*d1 - sin(hedg)*(-d2);\n\t\tlogo1y = y0 + sin(hedg)*d1 + cos(hedg)*(-d2);\n\n\n\t\tvisualization_msgs::Marker logo1;\n\t\tlogo1.header.frame_id = \"frame\";\n\t\tlogo1.header.stamp = ros::Time::now();\n\t\tlogo1.ns = \"logo\";\n\t\tlogo1.id = 0;\n\n\n\t\tuint32_t shape2 = visualization_msgs::Marker::TEXT_VIEW_FACING;\n\n\t\tlogo1.type = shape2;\n\t\tlogo1.action = visualization_msgs::Marker::ADD;\n\t\tlogo1.pose.position.x = logo1x;\n\t\tlogo1.pose.position.y = logo1y;\n\t\tlogo1.pose.position.z = 4;\n\n\n\t\tlogo1.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw( 0.0f, 0.0f,hedg+1.5707f);\n\n\t\tlogo1.scale.z = 0.8;\n\n\t\tlogo1.color.r = 1.0f;\n\t\tlogo1.color.g = 0.0f;\n\t\tlogo1.color.b = 0.0f;\n\t\tlogo1.color.a = 1.0;\n\n\t\tlogo1.lifetime = ros::Duration();\n\n\t\tlogo1.text=\"ISUZU\";\n\n\t\tmarker_pub.publish(logo1);\n\n\n\n\t\t// broadcast another frame: parent: \"frame\" -> child \"car\" (to add a target view on the vehicle)=======================================\n\t\ttransform.setOrigin( tf::Vector3(x0, y0, 0.0) );\n\t\ttransform.setRotation( tf::Quaternion(0, 0, 0, 1) );\n\t\tbr.sendTransform(tf::StampedTransform(transform, ros::Time::now(), \"frame\", \"car\"));\n\n\n*/\n\n\t}\n\treturn 0;\n\n\n}\n" }, { "alpha_fraction": 0.5951653122901917, "alphanum_fraction": 0.6506479978561401, "avg_line_length": 27.493776321411133, "blob_id": "c29251b0375425b6e3bb2748080f5bc4dab69e22", "content_id": "2daf9dae14d2e337e2a3ce061a73b4b226e7d16e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6867, "license_type": "no_license", "max_line_length": 241, "num_lines": 241, "path": "/src/control/src/sbw_send.cpp", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "// Isuzu Technical Center of America\n\n\n#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n\n#include \"std_msgs/MultiArrayLayout.h\"\n#include \"std_msgs/MultiArrayDimension.h\"\n#include \"std_msgs/Int16MultiArray.h\"\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <net/if.h>\n#include <sys/ioctl.h>\n#include <sys/socket.h>\n\n#include <linux/can.h>\n#include <linux/can/raw.h>\n\n\n/**\n * This tutorial demonstrates simple receipt of messages over the ROS system.\n */\n\n\nint16_t StrAng = 0;\nint8_t StrAng_byte1 = 0;\nint8_t StrAng_byte2 = 0;\n\nvoid chatterCallback(const std_msgs::Int16MultiArray::ConstPtr& msg)\n{\n\n\tStrAng = msg->data[0]*10;\n\tStrAng_byte1 = (StrAng & 0xff00) >> 8;\n\tStrAng_byte2 = StrAng & 0xff;\n}\n\n\n\n\n\n\nint main(int argc, char **argv)\n{\n\t/**\n\t* The ros::init() function needs to see argc and argv so that it can perform\n\t* any ROS arguments and name remapping that were provided at the command line.\n\t* For programmatic remappings you can use a different version of init() which takes\n\t* remappings directly, but for most command-line programs, passing argc and argv is\n\t* the easiest way to do it. The third argument to init() is the name of the node.\n\t*\n\t* You must call one of the versions of ros::init() before using any other\n\t* part of the ROS system.\n\t*/\n\tros::init(argc, argv, \"sbw_send\");\n\n\t/**\n\t* NodeHandle is the main access point to communications with the ROS system.\n\t* The first NodeHandle constructed will fully initialize this node, and the last\n\t* NodeHandle destructed will close down the node.\n\t*/\n\tros::NodeHandle n;\n\n\n\t/*if (close(s) < 0) {\n\tperror(\"Close\");\n\t}*/\n\t/**\n\t* The subscribe() call is how you tell ROS that you want to receive messages\n\t* on a given topic. This invokes a call to the ROS\n\t* master node, which keeps a registry of who is publishing and who\n\t* is subscribing. Messages are passed to a callback function, here\n\t* called chatterCallback. subscribe() returns a Subscriber object that you\n\t* must hold on to until you want to unsubscribe. When all copies of the Subscriber\n\t* object go out of scope, this callback will automatically be unsubscribed from\n\t* this topic.\n\t*\n\t* The second parameter to the subscribe() function is the size of the message\n\t* queue. If messages are arriving faster than they are being processed, this\n\t* is the number of messages that will be buffered up before beginning to throw\n\t* away the oldest ones.\n\t*/\n\tros::Subscriber sub = n.subscribe(\"str_wheel_ang_cmd\", 1000, chatterCallback); \n\n\t/**\n\t* ros::spin() will enter a loop, pumping callbacks. With this version, all\n\t* callbacks will be called from within this thread (the main one). ros::spin()\n\t* will exit when Ctrl-C is pressed, or the node is shutdown by the master.\n\t*/\n\t//ros::spin();\n\n\n\tros::Rate loop_rate(50); //Hz, 50Hz = 20ms\n\tint counter = 0;\n\twhile (ros::ok())\n\t{\n\n\n\t\tstruct sockaddr_can addr;\n\t\tstruct ifreq ifr;\n\t\tstruct can_frame frame;\n\n\n\n\t\t//Initialization\n\t\tint s;\n\t\tif ((s = socket(PF_CAN, SOCK_RAW, CAN_RAW)) < 0) {\n\t\tperror(\"Socket\");\n\t\t}\n\n\t\tstrcpy(ifr.ifr_name, \"can0\" );\n\t\tioctl(s, SIOCGIFINDEX, &ifr);\n\n\t\tmemset(&addr, 0, sizeof(addr));\n\t\taddr.can_family = AF_CAN;\n\t\taddr.can_ifindex = ifr.ifr_ifindex;\n\n\t\tif (bind(s, (struct sockaddr *)&addr, sizeof(addr)) < 0) {\n\t\tperror(\"Bind\");\n\t\t}\n\n\n\t\tif ( counter < 300 ){\n\n\t\t\t// 1st step: initial setting\n\t\t\tframe.can_id = 0x600;\n\t\t\tframe.can_dlc = 8;\n\t\t\tframe.data[0] = 0x00;//fa;\n\t\t\tframe.data[1] = 0x00;//ff;\n\t\t\tframe.data[2] = 0x00;\n\t\t\tframe.data[3] = 0x00;\n\t\t\tframe.data[4] = 0x00;\n\t\t\tframe.data[5] = 0x00;\n\t\t\tframe.data[6] = 0x00;\n\t\t\tframe.data[7] = 0x00;\n\n\t\t\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\t\tperror(\"Write\");\n\t\t\t}\n\n\t\t\tROS_INFO(\"Initial setting: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\t\t\tframe.can_id = 0x603;\n\t\t\tframe.can_dlc = 8;\n\t\t\tframe.data[0] = 0x00;//fa;\n\t\t\tframe.data[1] = 0x00;//ff;\n\t\t\tframe.data[2] = 0x00;\n\t\t\tframe.data[3] = 0x00;\n\t\t\tframe.data[4] = 0x00;\n\t\t\tframe.data[5] = 0x00;\n\t\t\tframe.data[6] = 0x00;\n\t\t\tframe.data[7] = 0x00;\n\n\t\t\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\t\tperror(\"Write\");\n\t\t\t}\n\n\t\t\tROS_INFO(\"Initial setting: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]); \n\n\t\t} else if (counter >= 300 && counter < 600){\n\t\t\t// 3rd step: zero steer angle\n\n\t\t\tframe.can_id = 0x600;\n\t\t\tframe.can_dlc = 8;\n\t\t\tframe.data[0] = 0x00;\n\t\t\tframe.data[1] = 0x00;\n\t\t\tframe.data[2] = 0x00;\n\t\t\tframe.data[3] = 0x00;\n\t\t\tframe.data[4] = 0x00;\n\t\t\tframe.data[5] = 0x00;\n\t\t\tframe.data[6] = 0x01; // ANG0 = 1 (zero point enable)\n\t\t\tframe.data[7] = 0x00;\n\n\n\t\t\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\t\tperror(\"Write\");\n\t\t\t}\n\n\t\t\tROS_INFO(\"Please zero steer angle now ...: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n\t\t} else {\n\n\t\t\t// Transmit 0X603\n\n\t\t\tframe.can_id = 0x603;\n\t\t\tframe.can_dlc = 8;\n\t\t\tframe.data[0] = 0x00;\n\t\t\tframe.data[1] = 0x00;\n\t\t\tframe.data[2] = 0x00; // VSPD (vehicle speed)\n\t\t\tframe.data[3] = 0x00; // VSPD (vehicle speed)\n\t\t\tframe.data[4] = 0x01; // VSPDEN (vehicle speed enable)\n\t\t\tframe.data[5] = 0x00;\n\t\t\tframe.data[6] = 0x00; // ANG0 = 0 (zero point disable)\n\t\t\tframe.data[7] = 0x00;\n\n\t\t\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\t\tperror(\"Write\");\n\t\t\t}\n\n\t\t\tROS_INFO(\"Transmit speed: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n\n\t\t\t// Transmit 0x600: CTRLMODE, CTRLREQ, and ANGREQ / TRQREQ \n\t\t\tframe.can_id = 0x600;\n\t\t\tframe.can_dlc = 8;\n\t\t\tframe.data[1] = 0x00;\n\t\t\tframe.data[0] = 0x00;\n\t\t\tframe.data[2] = StrAng_byte1; // Steer angle\n\t\t\tframe.data[3] = StrAng_byte2; // Steer angle\n\t\t\tframe.data[4] = 0x01; // CTRLREQ\n\t\t\tframe.data[5] = 0x01; // CTRLMODE = 1 (angle control mode)\n\t\t\tframe.data[6] = 0x00;\n\t\t\tframe.data[7] = 0x00;\n\n\n\t\t\tif (write(s, &frame, sizeof(struct can_frame)) != sizeof(struct can_frame)) {\n\t\t\t\tperror(\"Write\");\n\t\t\t}\n\n\t\t\tROS_INFO(\"Enable CTRLMODE and CTRLRQE: 0x%08X [%d] %02x %02x %02x %02x %02x %02x %02x %02x\",frame.can_id, frame.can_dlc, frame.data[0],frame.data[1],frame.data[2],frame.data[3],frame.data[4],frame.data[5],frame.data[6],frame.data[7]);\n\n\n\n\n\t\t\tif (close(s) < 0) {\n\t\t\t\tperror(\"Close\");\n\t\t\t}\n\t\t}\n\n\t\tcounter++;\n\n\t\tros::spinOnce();\n\t\tloop_rate.sleep();\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.7226107120513916, "avg_line_length": 34.70833206176758, "blob_id": "8e8d8e2b503916bf1cc44a48f2646fde6cf8d5b4", "content_id": "89771b592ecb4c8423c83e869b28368836477ccb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 127, "num_lines": 24, "path": "/src/path_draw_GOOGLE/gmplot_gps_path.py", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "# import gmplot package \n\nimport gmplot \n\nwith open('/home/qiyang-isuzu/Desktop/ISUZU/ros_ws_DBW_CAN/lat_raw.txt') as f_lat:\n latitude_list = f_lat.read().splitlines()\n\nlatitude_list = list(map(float,latitude_list))\n\nwith open('/home/qiyang-isuzu/Desktop/ISUZU/ros_ws_DBW_CAN/lon_raw.txt') as f_lon:\n longitude_list = f_lon.read().splitlines()\n\nlongitude_list = list(map(float,longitude_list))\n \ngmap3 = gmplot.GoogleMapPlotter(42.3842014,-83.5005484, 14, apikey='AIzaSyAXFZ4CBWewIGa1-NEiZ9gbM_uSal4Ot9M') #apikey='AIzaSyBIvup4Q9gQGeOMvYnH-XdXadiAd-1KeYE') \n# scatter method of map object \n# scatter points on the google map \n#gmap3.scatter( latitude_list, longitude_list, '#FF0000', size = 1, marker = False ) \n \n# Plot method Draw a line in \n# between given coordinates \ngmap3.plot(latitude_list, longitude_list,'cornflowerblue', edge_width = 2) \n\ngmap3.draw( \"my_gm_plot.html\" )\n\n" }, { "alpha_fraction": 0.5156274437904358, "alphanum_fraction": 0.5642825961112976, "avg_line_length": 28.362621307373047, "blob_id": "8a731f365c1df75e2d655b4c558522c31db5b88d", "content_id": "6185b863bd9aa57618987e7cd9cca11e0a47b66b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 57833, "license_type": "no_license", "max_line_length": 397, "num_lines": 1969, "path": "/src/control/src/frozen_code/truck0216.cpp", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "//0918 mx 0924 pz 0928 mx 1116 mx\n\n#include <ros/ros.h>\n#include <ros/package.h> // for ros::package::getPath()\n\n#include \"std_msgs/String.h\" // for sending msg type: std_msgs/String\n#include \"std_msgs/Int16.h\" // for sending msg type: std_msgs/String\n#include \"std_msgs/Float32MultiArray.h\"\n#include \"std_msgs/Float64MultiArray.h\" // for lat lon, 32bit not accurate\n#include \"std_msgs/Int16MultiArray.h\"\n\n#include <iostream>\n#include <fstream>\n#include <iomanip> // for cout double with precision(6)\n#include <cmath>\n\n#include <string> // for udp socket programming - client\n#include <sys/socket.h> // for udp socket programming - client\n#include <arpa/inet.h> // for udp socket programming - client\n#include <unistd.h> // for udp socket programming - client\n#include <stdio.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <sys/types.h>\n#include <netinet/in.h>\n\n//#define PORT 8080 // for udp socket programming - client\n//#define PORT 80 // for udp socket programming - client\n//#define DEST_PORT 80\n//#define DSET_IP_ADDRESS \"192.168.0.2\"\n\n#include \"boost/asio.hpp\"\n#include <boost/array.hpp>\n#include <boost/bind.hpp>\n\n#include <thread>\n\n#include <math.h>\n#include <chrono>\n#include <vector>\n#include \"Eigen-3.3/Eigen/Core\"\n#include \"Eigen-3.3/Eigen/QR\"\n#include \"MPC.h\"\n\nusing boost::asio::ip::udp;\nusing namespace std;\n#define IPADDRESS \"192.168.0.2\" // Mbed\n#define UDP_PORT 80\n\n\n\n#include \"std_msgs/Float64.h\" //ROS\n\n#include <visualization_msgs/Marker.h>//for publishing markers to rviz\n\n#include <tf/transform_broadcaster.h> // for broadcasting a frame\n\n#include <tf/transform_datatypes.h> // for publishing vehicle yaw using quaternion\n\n\nusing namespace Eigen;\n// MPC is initialized here!\nMPC mpc;\n\n\nint simulation_mode = 0; //--------------------------------------------------> choose mode here! 0:experiments; 1:simulation - 0918 mx\n\n\n\ndouble gnss_arr[2];\nfloat hedg_arr[1];\n\nbool pos_up = false;\nbool ang_up = false;\n\nfloat get_ang (float x1, float y1, float x2, float y2, float x3, float y3, float x4, float y4) // angle between 2 vectors\n{\n\tfloat mag1 = sqrt ((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1));\n\tfloat mag2 = sqrt ((x4-x3)*(x4-x3)+(y4-y3)*(y4-y3));\n\tfloat dot_product = (x2-x1)*(x4-x3)+(y2-y1)*(y4-y3);\n\tfloat angle = acos(dot_product/mag1/mag2); // in radians\n\treturn angle;\n}\n\n\nfloat det( float matrix[3][3], int n) {\n\n float d = 0;\n float submatrix[3][3];\n if (n == 2)\n return ((matrix[0][0] * matrix[1][1]) - (matrix[1][0] * matrix[0][1]));\n else {\n for (int x = 0; x < n; x++) {\n int subi = 0;\n for (int i = 1; i < n; i++) {\n int subj = 0;\n for (int j = 0; j < n; j++) {\n if (j == x)\n continue;\n submatrix[subi][subj] = matrix[i][j];\n subj++;\n }\n subi++;\n }\n d = d + (pow(-1, x) * matrix[0][x] * det( submatrix, n - 1 ));\n }\n }\n return d;\n}\n\n\nvoid get_r_center (float x1, float y1, float x2, float y2, float x3, float y3, float &add_r, float &add_cx0, float &add_cy0) // r/curvature/center\n{\n\n\tx1 = x1*1000;\n\ty1 = y1*1000;\n\tx2 = x2*1000;\n\ty2 = y2*1000;\n\tx3 = x3*1000;\n\ty3 = y3*1000;\n\n\n\tfloat M1[3][3] = { {x1*x1+y1*y1,y1,1.0f}, {x2*x2+y2*y2,y2,1.0f}, {x3*x3+y3*y3,y3,1.0f} };\n\tfloat M2[3][3] = { {x1,y1,1.0f},{x2,y2,1.0f},{x3,y3,1.0f} };\n\tfloat M3[3][3] = { {x1,x1*x1+y1*y1,1.0f},{x2,x2*x2+y2*y2,1.0f},{x3,x3*x3+y3*y3,1.0f} };\n\n\t// argument center cx0 xy0 are passed by giving addresses of memory locations\n\tadd_cx0 = det(M1,3.0f)/det(M2,3.0f)*0.5f/1000;\n\tadd_cy0 = det(M3,3.0f)/det(M2,3.0f)*0.5f/1000;\n\n\t//cout << \"det1:\" << det(M1,3) <<\"\\n\"<< \"det2:\" << det(M2,3)<<\"\\n\" << \"det3:\" << det(M3,3) <<\"\\n\";\n\t// get area\n\tfloat vx1 = x2-x1;\n\tfloat vy1 = y2-y1;\n\tfloat vx2 = x3-x2;\n\tfloat vy2 = y3-y2;\n\tfloat vx3 = x1-x3;\n\tfloat vy3 = y1-y3;\n\tfloat A = (vx1*vy2 - vy1*vx2)/2.0f;\n\t// get length\n\tfloat L1 = sqrt( vx1*vx1 + vy1*vy1 );\n\tfloat L2 = sqrt( vx2*vx2 + vy2*vy2 );\n\tfloat L3 = sqrt( vx3*vx3 + vy3*vy3 );\n\t// get r\n\tadd_r = L1*L2*L3/4.0f/A/1000;\n\tif(A == 0){ add_r = 100000;}\n}\n\n\nfloat VL22_model_r2str (float r)\n{\n\tfloat str_ang; // tire angle\n\tfloat str_wheel_ang; // steering wheel angle position\n\tfloat str_ratio = 40.0/900.0; // dont forget using float data type\n\tfloat Lf = 2.76; //in meter\n\tif (r >= 0){\n\t\tstr_ang = - atan( Lf / r ); // when r > 0, left turn, str_ang assumed to be < 0\n\t\tstr_wheel_ang = str_ang / str_ratio;\n\t\tstr_wheel_ang = str_wheel_ang * 180.0 / 3.1415926;\n\t}else{\n\n\t\tstr_ang = - atan( Lf / r ); // when r < 0, right turn, str_ang assumed to be > 0\n\t\tstr_wheel_ang = str_ang / str_ratio;\n\t\tstr_wheel_ang = str_wheel_ang * 180.0 / 3.1415926;\n\t}\n\tif (str_wheel_ang > 800){\n\t\tstr_wheel_ang = 800;\n\t\tcout << \"curvature too high!\" << \"\\n\";\n\t}\n\tif (str_wheel_ang < -800){\n\t\tstr_wheel_ang = -800;\n\t\tcout << \"curvature too high!\" << \"\\n\";\n\t}\n\treturn str_wheel_ang;\n}\n\nfloat MKZ_model_r2str (float r)\n{\n\tint str_step;\n\tif (r >= 0){\n\t\tstr_step = atan(-4.8f/(r-0.802464f)) * 5000.0f ; // left turn with r > 0, str_step < 0\n\t}else{\n\t\tstr_step = atan(-4.8f/(r+0.802464f)) * 5000.0f ; // right turn with r < 0, str_step >0\n\t}\n\tif (str_step > 5000){\n\t\tstr_step = 5000;\n\t\tcout << \"curvature too high!\" << \"\\n\";\n\t}\n\tif (str_step < -5000){\n\t\tstr_step = -5000;\n\t\tcout << \"curvature too high!\" << \"\\n\";\n\t}\n\treturn str_step;\n}\n\nint find_side_2_line (float x0, float y0, float x1, float y1, float x2, float y2)\n{\n\tint side;\n\tfloat A = y2 - y1;\n\tfloat B = x1 - x2;\n\tfloat C = x2*y1 - x1*y2;\n\tfloat D = A*x0 + B*y0 +C;\n\tif ( D > 0 ){\n\t\tside = -1; // on the left\n\t}else if ( D < 0 ){\n\t\tside = 1; // on the right\n\t}else{\n\t\tside = 0; // on the line\n\t}\n\treturn side;\n}\n\nfloat find_d_point_2_line ( float x0, float y0, float x1, float y1, float x2, float y2 ) // distance btw point to line defined by 2 points\n{\n\tfloat A = y2 -y1;\n\tfloat B = x1 - x2;\n\tfloat C = x2*y1 - x1*y2;\n\tfloat d = abs( A*x0 + B*y0 + C) / sqrt( A*A + B*B );\n\treturn d;\n}\n\n\nfloat hedg2ENU (float hedg) // for simulation, range from -Pi to Pi\n{\n\thedg = 3.1415926f*2.0f - hedg/180.0f*3.1415926f;\n\thedg = hedg - 3.1415926f*3.0f/2.0f;\n\tif ( hedg < -3.1415926f ){\n\t\thedg = hedg + 2.0f*3.1415926f;\n\t}\n\treturn hedg;\n}\n\nvoid MKZ_model ( float x0, float y0, float hedg, float str_step, float d_travel, float &add_x0, float &add_y0, float &add_hedg )//for simulation, hedg in radian\n{\n\tif (str_step > 5000){\n\t\tstr_step = 5000;\n\t}\n\tif (str_step < -5000){\n\t\tstr_step = -5000;\n\t}\n\n\tstr_step = -str_step; // due to an issue in matlab code\n\tfloat d1 = 0.88; // width/2\n\tfloat d2 = 1.082; // rear axial to rear end\n\tfloat d3 = 3.843; // rear axial to front end\n\tfloat dhedg;\n\tfloat r;\n\tfloat str_step_max = 5000.0;\n\tif (str_step == 0){\n\t\tadd_x0 = x0 + cos(hedg)*d_travel;\n\t\tadd_y0 = y0 + sin(hedg)*d_travel;\n\t\tadd_hedg =\thedg;\n\t}else{\n\t\tif (str_step >= 0){\n\t\t\tr = -4.8f/tan( str_step/str_step_max ) - 0.802464f;\n\t\t}else{\n\t\t\tr = -4.8f/tan( str_step/str_step_max ) + 0.802464f;\n\t\t}\n\t\tdhedg = - d_travel/r;\n\t\tfloat dd1 = sin(dhedg)*r;\n\t\tfloat dd2 = r - cos(dhedg)*r;\n\t\tadd_x0 = x0 + cos(3.1415926f + hedg)*dd1 - sin(3.1415926f + hedg)*dd2;\n\t\tadd_y0 = y0 + sin(3.1415926f + hedg)*dd1 + cos(3.1415926f + hedg)*dd2;\n\t\tadd_hedg = hedg + dhedg;\n\t\tif( add_hedg > 3.1415926f ){\n\t\t\tadd_hedg = add_hedg - 2.0f*3.1415926f;\n\t\t}else if( add_hedg < - 3.1415926f ){\n\t\t\tadd_hedg = add_hedg + 2.0f*3.1415926f;\n\t\t}\n\n\t}\n}\n\nvoid dynamic_model ( float x0, float y0, float hedg, float str_step, float d_travel, float &add_x0, float &add_y0, float &add_hedg )//for simulation, hedg in radian\n{\n\tfloat theta; //str angle\n\tfloat Vx = 0.1; // m/s vehicle speed\n\tfloat dt = 0.05; // seconds\n\tfloat lf = 3; //meter\n\tfloat lr = 1; //meter\n\tfloat Caf = 1; // conering stiffness??\n\tfloat Car = 1;\n\tfloat m = 1000; // Kg vechile weight\n\tfloat Iz = 500; // moment of inertia for z axis\n\tfloat ax = 0; //accleration in local x, assumming zero;\n\tfloat y, y_new, x, x_new, yaw, yaw_new, X, X_new, Y, Y_new;\n\tfloat dy, dy_new, dx, dx_new, dyaw, dyaw_new;\n\n\ty = 0; // local coordinate\n\tx = 0;\n\tyaw = hedg;\n\tX = x0; //global coordinate\n\tY = y0;\n\n\tdy = 0;\n\tdx = Vx;\n\tdyaw = 0;\n\n\n\tif (str_step > 5000){\n\t\tstr_step = 5000;\n\t}\n\tif (str_step < -5000){\n\t\tstr_step = -5000;\n\t}\n\tstr_step = -str_step; // due to an issue in matlab code\n\ttheta = str_step/5000.0f*540.0f*3.14f/180.0f; // str_angle\n\n\tfor(int i = 0; i < 20; i++)\n\t{\n\t\ty_new = y + dy*dt;\n\t\tdy_new = dy - 2*(Caf*lf - Car*lr)*dy*dt/m/Vx - Vx*dyaw*dt - 2*(Caf*lf-Car*lr)*dyaw*dt/m/Vx + 2*Caf*theta*dt/m;\n\t\tyaw_new = yaw + dyaw*dt;\n\t\tdyaw_new = dyaw - 2*(Caf*lf-Car*lr)*dy*dt/Iz/Vx - 2*(Caf*lf*lf+Car*lr*lr)*dyaw*dt/Iz/Vx + 2*lf*Caf*theta*dt/Iz;\n\t\tdx_new = dx + dyaw_new*dy_new*dt + ax*dt;\n\t\tx_new = x + dx_new*dt;\n\n\t\tX_new = X + (dx*cos(yaw) - dy*sin(yaw))*dt;\n\t\tY_new = Y + (dx*sin(yaw) + dy*cos(yaw))*dt;\n\n\t\ty = y_new;\n\t\tdy = dy_new;\n\t\tyaw = yaw_new;\n\t\tdyaw = dyaw_new;\n\t\tx = x_new;\n\t\tdx = dx_new;\n\n\t\tX = X_new;\n\t\tY = Y_new;\n\t}\n\n\tadd_x0 = X_new;\n\tadd_y0 = Y_new;\n\tadd_hedg = yaw_new;\n}\n\n\nvoid gnssCallback(const std_msgs::Float64MultiArray::ConstPtr& msg)\n{\n\n\tint i = 0;\n\n\n\tfor(std::vector<double>::const_iterator it = msg->data.begin(); it != msg->data.end(); ++it) // \"double\" consistent to \"Float64\" \n\t{\n\t gnss_arr[i] = *it;\n\t i++;\n\t}\n\n\tint j = 0;\n\tfor(j = 0; j < 2; j++)\n\t{\n\t\tprintf(\"%0.7f, \", gnss_arr[j]);\n\t}\n\tprintf(\"\\n\");\n\n\tROS_INFO(\"I heard gnss: [%0.7f, %0.7f]\", gnss_arr[0],gnss_arr[1]);\n\n\tpos_up = true;\n\n\treturn;\n}\n\nvoid hedgCallback(const std_msgs::Float32MultiArray::ConstPtr& msg)\n{\n\n\tint i = 0;\n\n\n\tfor(std::vector<float>::const_iterator it = msg->data.begin(); it != msg->data.end(); ++it)\n\t{\n\t hedg_arr[i] = *it;\n\t i++;\n\t}\n\n\tint j = 0;\n\tfor(j = 0; j < 1; j++)\n\t{\n\tprintf(\"%f, \", hedg_arr[j]);\n\t}\n\tprintf(\"\\n\");\n\n\tROS_INFO(\"I heard heading: [%f]\", hedg_arr[0]);\n\n\tang_up = true;\n\n\treturn;\n}\n\nvoid GNSS_read()\n{\n\twhile(ros::ok())\n\t{\n\t\tros::spinOnce();\n\t}\n}\n\n\n// MPC - Evaluate a polynomial.\ndouble polyeval(Eigen::VectorXd coeffs, double x) {\n double result = 0.0;\n for (int i = 0; i < coeffs.size(); i++) {\n result += coeffs[i] * pow(x, i);\n }\n return result;\n}\n\n// Fit a polynomial.\n// Adapted from\n// https://github.com/JuliaMath/Polynomials.jl/blob/master/src/Polynomials.jl#L676-L716\nEigen::VectorXd polyfit(Eigen::VectorXd xvals, Eigen::VectorXd yvals,\n int order) {\n assert(xvals.size() == yvals.size());\n assert(order >= 1 && order <= xvals.size() - 1);\n Eigen::MatrixXd A(xvals.size(), order + 1);\n\n for (int i = 0; i < xvals.size(); i++) {\n A(i, 0) = 1.0;\n }\n\n for (int j = 0; j < xvals.size(); j++) {\n for (int i = 0; i < order; i++) {\n A(j, i + 1) = A(j, i) * xvals(j);\n }\n }\n\n auto Q = A.householderQr();\n auto result = Q.solve(yvals);\n return result;\n}\n\nvoid mpc_fun(vector<double> ptsx, vector<double> ptsy, double px, double py, double psi, double v, double &steer_value, double &throttle_value, vector<double> &mpc_x_vals, vector<double> &mpc_y_vals, vector<double> &next_x_vals, vector<double> &next_y_vals, vector<double> &mpc_x_ref, vector<double> &mpc_y_ref) {\n /*\n * Calculate steering angle and throttle using MPC.\n *\n * Both are in between [-1, 1].\n *\n */\n VectorXd waypoint_x_vcs = VectorXd(ptsx.size());\n VectorXd waypoint_y_vcs = VectorXd(ptsy.size());\n\n // transfrom from map coordinate system to vehicle coordinate system (VCS)\n for (size_t i = 0; i < ptsx.size(); ++i) {\n double dx = ptsx[i] - px;\n double dy = ptsy[i] - py;\n waypoint_x_vcs[i] = dx * cos(psi) + dy * sin(psi);\n waypoint_y_vcs[i] = -dx * sin(psi) + dy * cos(psi);\n }\n\n auto coeffs = polyfit(waypoint_x_vcs, waypoint_y_vcs, 3); // 3rd order polynomial fitting\n // since we use the vehicle coordinates, the x is 0\n auto cte = polyeval(coeffs, 0);\n auto epsi = -atan(coeffs[1]);\n\n Eigen::VectorXd state(6);\n state << 0, 0, 0, v, cte, epsi;\n\n std::vector<double> x_vals = {state[0]};\n std::vector<double> y_vals = {state[1]};\n std::vector<double> psi_vals = {state[2]};\n std::vector<double> v_vals = {state[3]};\n std::vector<double> cte_vals = {state[4]};\n std::vector<double> epsi_vals = {state[5]};\n std::vector<double> delta_vals = {};\n std::vector<double> a_vals = {};\n\n // compute the optimal trajectory\n auto result = mpc.Solve(state, coeffs);\n\n // update the previous steering and acceleration\n mpc.setPrevDelta(result[0]);\n mpc.setPrevA(result[1]);\n\n steer_value = -result[0];\n throttle_value = result[1];\n\n // NOTE: Remember to divide by deg2rad(25) before you send the steering value back.\n // Otherwise the values will be in between [-deg2rad(25), deg2rad(25] instead of [-1, 1].\n // msgJson[\"steering_angle\"] = steer_value / deg2rad(25);\n // msgJson[\"throttle\"] = throttle_value;\n\n //Display the MPC predicted trajectory\n mpc_x_vals = mpc.getMpcX();\n mpc_y_vals = mpc.getMpcY();\n\n //.. add (x,y) points to list here, points are in reference to the vehicle's coordinate system\n // the points in the simulator are connected by a Green line\n // msgJson[\"mpc_x\"] = mpc_x_vals;\n // msgJson[\"mpc_y\"] = mpc_y_vals;\n\n //Display the waypoints/reference line\n //vector<double> next_x_vals(ptsx.size()); \n //vector<double> next_y_vals(ptsx.size());\n\n for (size_t i = 0; i < ptsx.size(); ++i) {\n next_x_vals[i] = waypoint_x_vcs[i];\n next_y_vals[i] = waypoint_y_vcs[i];\n }\n\t\t\t\t\t\t\t\t\n\n\n unsigned long long num_ptsx = ptsx.size();\n double dptsx = ( next_x_vals[num_ptsx-1] - next_x_vals[0] ) /10;\t \n //cout << \"dptsx =\" << dptsx << endl; \n\n\n for (size_t i = 0; i < 11; ++i) { \n\tmpc_x_ref[i] = next_x_vals[0] + i*dptsx;\n\tmpc_y_ref[i] = polyeval(coeffs, mpc_x_ref[i]);\n }\n\n\n\n //.. add (x,y) points to list here, points are in reference to the vehicle's coordinate system\n // the points in the simulator are connected by a Yellow line\n // msgJson[\"next_x\"] = next_x_vals;\n // msgJson[\"next_y\"] = next_y_vals;\n}\n\n\nfloat trj_plan_pp ( float x0, float y0, float hedg, float x1, float y1, float x2, float y2 ) // pp - pure pursuit\n{\n\tfloat str_ang_max = 540; // ------------> double check truck's parameters\n\tfloat str_step_arr[23] = {-22500, -20000, -17500, -15000, -12500, -10000, -7500, -5000, -2500, -1000, -500, 0, 500, 1000, 2500, 5000, 7500, 10000, 12500, 15000, 17500, 20000, 22500};\n\tfloat str_step, str_step_opt;\t\n\tfloat str_step_max = 25000;\n\tfloat str_ang, str_ang_opt;\t\n\tfloat d_travel = 3.0; //meter\n\tfloat str_ang_cmd;\n\tfloat d1 = 0.84; // width/2 for mkz -----------> double check turck's parameters\n\tfloat d2 = 1.082; // rear axial to rear end for mkz -----------> double check turck's parameters\n\tfloat d3 = 3.843; // rear axial to front end for mkz -----------> double check turck's parameters\n\tfloat dhedg;\n\tfloat r;\n\tfloat x0_out;\n\tfloat y0_out;\n\tfloat hedg_out;\n\tfloat x0_frt, y0_frt;// vehicle front head \n\tfloat d_frt = 3; // vehicle front head distance (m)\n\tfloat ang1, ang2;\n\tfloat cost;\n\tfloat cost1;\n\tfloat cost2;\t\n\tfloat cost_min = 10000;\n\tfloat dd1, dd2;\t\n\n\tfor (int i = 0; i < 23; i++){\n\t\tstr_step = str_step_arr[i];\n\n\t\tstr_step = -str_step; // due to an issue in matlab code\n\n\t\t// truck kinematic model\n\t\tif (str_step == 0){\n\t\t\tx0_out = x0 + cos(hedg)*d_travel; \n\t\t\ty0_out = y0 + sin(hedg)*d_travel;\n\t\t\thedg_out = hedg;\n\t\t}else{\n\t\t\tr = 4.8/tan( abs(str_step)/str_step_max ) - 1.9; // for truck\n\t\t\tif (str_step >= 0){\n\t\t\t\tr = -r;\n\t\t\t}\n\t\t\tdhedg = -d_travel/r; \n\t\t\tfloat dd1 = sin(dhedg)*r;\n\t\t\tfloat dd2 = r - cos(dhedg)*r;\n\t\t\tx0_out = x0 + cos(3.1415926 + hedg)*dd1 - sin(3.1415926 + hedg)*dd2;\n\t\t\ty0_out = y0 + sin(3.1415926 + hedg)*dd1 + cos(3.1415926 + hedg)*dd2;\n\t\t\thedg_out = hedg + dhedg;\n\t\t}\n\n\n\t\t// evaluate cost functions\n\t\t\n\t\tx0_frt = x0_out + d_frt*cos(hedg); \n\t\ty0_frt = y0_out + d_frt*sin(hedg);\t\n\t\tang1 = get_ang(x0_out, y0_out, x2, y2, x0_out, y0_out, x0_frt, y0_frt); //vehicle to p2 vector and segment vector angle\n\t\tang2 = get_ang(x0_out, y0_out, x0_frt, y0_frt, x1, y1, x2, y2); // vehicle heading vector and segment vector angle\n\t\tcost1 = ang1;\n\t\tcost2 = ang2;\n\t\tcost = 0.9*ang1 + 0.1*ang2;\n\t\tif ( cost < cost_min ){\n\t\t\tstr_step_opt = str_step_arr[i];\n\t\t\t//str_ang_opt = str_step_opt/str_step_max*810; // relation ????????????????????????????????????????????????????????????????????????\n\t\t\tcost_min = cost;\n\t\t}\n\t\tcout <<\"0928 debug ----> str_angle =\" << str_step;\n\t\tcout <<\"; cost =\" << cost << \"\\n\";\n\t\tcout << \"x0, y0, x1,y1,x2,y2: \" << x0 << y0<<x1<<y1<<x2<<y2<<\"\\n\";\n\n\n\t}\n\n\n\t//cout << \"optimized steering angle = \" << str_ang << \"\\n\";\n\treturn str_step_opt;\n}\n\n\nvoid truck_kinematics_model ( float x0, float y0, float hedg, float str_step, float d_travel, float &add_x0, float &add_y0, float &add_hedg )//for simulation, hedg in radian\n{\n\tif (str_step > 25000){\n\t\tstr_step = 25000;\n\t}\n\tif (str_step < -25000){\n\t\tstr_step = -25000;\n\t}\n\n\tstr_step = -str_step; // due to an issue in matlab code\n\tfloat d1 = 0.88; // width/2\n\tfloat d2 = 1.082; // rear axial to rear end\n\tfloat d3 = 3.843; // rear axial to front end\n\tfloat dhedg;\n\tfloat r;\n\tfloat str_step_max = 25000.0;\n\n\n\t// truck kinematic model\n\tif (str_step == 0){\n\t\tadd_x0 = x0 + cos(hedg)*d_travel; \n\t\tadd_y0 = y0 + sin(hedg)*d_travel;\n\t\tadd_hedg = hedg;\n\t}else{\n\t\tr = 4.8/tan( abs(str_step)/str_step_max ) - 1.9; // for truck\n\t\tif (str_step >= 0){\n\t\t\tr = -r;\n\t\t}\n\t\tdhedg = -d_travel/r; \n\t\tfloat dd1 = sin(dhedg)*r;\n\t\tfloat dd2 = r - cos(dhedg)*r;\n\t\tadd_x0 = x0 + cos(3.1415926 + hedg)*dd1 - sin(3.1415926 + hedg)*dd2;\n\t\tadd_y0 = y0 + sin(3.1415926 + hedg)*dd1 + cos(3.1415926 + hedg)*dd2;\n\t\tadd_hedg = hedg + dhedg;\n\t}\n\n}\n\n\n\n\nint main ( int argc, char**argv){\n\n\t//Initialize the ROS system.\n\tros::init(argc, argv, \"mkz\");\n\n\t// Establish this program as a ROS node.\n\tros::NodeHandle n;\n\n\t// Create publisher object\n\tros::Publisher marker_pub = n.advertise<visualization_msgs::Marker>(\"visualization_markers\", 1000);\n\n\tros::Publisher float_pub = n.advertise<std_msgs::Float64>(\"float\", 1000);\n\n\n\tros::Publisher str_pub = n.advertise<std_msgs::Int16MultiArray>(\"str_wheel_ang_cmd\", 1000);\n\n\n\n\n\n\tros::Subscriber gnss_sub = n.subscribe(\"gnss_msg\", 1, gnssCallback);\n\tros::Subscriber hedg_sub = n.subscribe(\"ang_msg\", 1, hedgCallback);\n\n\n\tros::spinOnce();\n\tthread GNSSread(GNSS_read);\n\n \tros::Rate rate(30);\n\n\t//Send some output as a log message.\n\tROS_INFO_STREAM(\"Hello, ROS!\");\n\n\t// Broadcast a frame\n\ttf::TransformBroadcaster br;\n\ttf::Transform transform;\n\n\n\t//Parameters\n\n\tint i = 1;\n\n\tdouble lon, lat;\n\tfloat x0, y0, hedg; // vehicle ENU coordinate\n\tfloat x0_frt, y0_frt;// vehicle front head\n\tfloat d_frt = 3.0 ; // vehicle front head distance (m)\n\t// float d_frt = 5.0 ;\n\tdouble wps_lon[3000], wps_lat[3000];\n\tfloat wps_x_raw[3000], wps_y_raw[3000];\n\tfloat wps_x[3000], wps_y[3000]; // after removing sharp corners\n\tdouble in_lat, in_lon;\n\tint lat_num;\n\tint lon_num;\n\tint wps_num_raw;\n\tint wps_num = 0;\n\tint wps_num_0 = 10000;\n\tdouble lat_ref, lon_ref;\n\tint idx_glb, idx;\n\t//float x1, x2, y1, y2, x3, y3;\n\tdouble x1, x2, y1, y2, x3, y3, x4, y4, x5, y5, x6, y6;\n\tfloat ang1, ang2, ang3;\n\tfloat d_v2seg_min, d_v2wps_min;\n\tfloat d_v2seg, d_v2wps;\n\tint wps_idx; //global 0, 1, 2, ...\n\tint wps_wdw_num = 14; // window ahead with wps_idx\n\tint wps_wdw[wps_wdw_num];\n\tint wps_wdw_idx;\n\tfloat r, cx0, cy0;\n\tint str_ctrl_mode;\n\tfloat d_o2v; // circle center to vechile\n\tint Kp, Kd;\n\tfloat offset_ctrl;\n\tfloat offset_ctrl_pre = 0;\n\tfloat d_offset;\n\tfloat str_step_ref;\n\tfloat str_wheel_ang_ref;\n\tfloat str_step;\n\tfloat str_wheel_ang;\n\tint str_cmd; // must be int\n\tfloat d_p2line;\n\tint side2line;\n\n\tfloat theta1, theta3, d_theta;// for plotting trajectory plan\n\tfloat traj_x[100], traj_y[100];\n\n\n\tfloat d_travel = 0.1;//simulation\n\n\t//double add_x0, add_y0, add_hedg; // simulation\n\tfloat add_x0, add_y0, add_hedg; // simulation\n\tfloat add_cx0, add_cy0, add_r;\n\n\n\tvector<double> mpc_x_vals;\n\tvector<double> mpc_y_vals;\n\n\tvector<double> next_x_vals;\n\tvector<double> next_y_vals;\n\n\tvector<double> mpc_x_ref; // reference trajectory by polyfit\n\tvector<double> mpc_y_ref;\n\n\n\t// print to user\n\tcout << \"Hello World!\" << endl;\n\n\n\t// Load map\n\n\tstd::string pkg_loc = ros::package::getPath(\"truck\");\t// add header file <ros/package.h>; add std_msgs roslib to cmake and xml files--------------------??????????????????????????\n\tcout << \"The path is :\" << pkg_loc << endl;\n\n\tifstream inFile_lat(pkg_loc+ \"/lat_raw.txt\");\n\tif (inFile_lat.is_open())\n\t{\n\t\tlat_num = 0;\n\t\twhile ( inFile_lat >> in_lat )\n\t\t{\n\n\t\t\t//cout << fixed << setprecision(8) << in_lat << '\\n';\n\t\t\twps_lat[lat_num] = in_lat;\n\t\t\t//cout << lat_raw[lat_num] << '\\n';\n\t\t\tlat_num = lat_num + 1;\n\t\t}\n\t\tinFile_lat.close();\n\t\tcout << \"lat_num is equal to \" << lat_num << '\\n';\n\t}else{\n\t\tcout << \"Unable to open file lat_raw.txt\";\n\t\texit(1); // call system to stop\n\t}\n\n\n\tifstream inFile_lon(pkg_loc + \"/lon_raw.txt\");\n\tif (inFile_lon.is_open())\n\t{\n\t\tlon_num = 0;\n\t\twhile ( inFile_lon >> in_lon )\n\t\t{\n\n\t\t\t//cout << fixed << setprecision(8) << in_lon << '\\n';\n\t\t\twps_lon[lon_num] = in_lon;\n\t\t\t//cout << lon_raw[lon_num] << '\\n';\n\t\t\tlon_num = lon_num + 1;\n\t\t}\n\t\tinFile_lon.close();\n\t\tcout << \"lon_num is equal to \" << lon_num << '\\n';\n\t}else{\n\t\tcout << \"Unable to open file lon_raw.txt\";\n\t\texit(1); // call system to stop\n\t}\n\tif (lat_num == lon_num)\n\t{\n\t\twps_num_raw = lat_num;\n\t}else{\n\t\tcout << \"lon_num not equal to lat_num, check lon_raw.txt lat_raw.txt\";\n\t\texit(1); // call system to stop\n\t}\n\tcout << \"wps_num_raw =\" << wps_num_raw << \"\\n\";\n\n\t// Geodetic to ENU\n\tlon_ref = wps_lon[0];\n\tlat_ref = wps_lat[0];\n\tfor (int j = 0; j < wps_num_raw; j++) {\n\t\twps_x_raw[j] = (wps_lon[j] - lon_ref)*82230.670304;\n\t\twps_y_raw[j] = (wps_lat[j] - lat_ref)*111132.944444;\n\t\t//cout << \"wps_x\" << j << \":\" << wps_x[j] << \"\\n\";\n\t\t//cout << \"wps_y\" << j << \":\" << wps_y[j] << \"\\n\";\n\t}\n\n\t\n // debug =======================\n\tfor(int j=0; j<wps_num_raw; j++){\n\t\twps_x[j] = wps_x_raw[j];\n\t\twps_y[j] = wps_y_raw[j];\n\t}\n\twps_num = wps_num_raw;\n\t\n\n\n\t// Remove sharp corners from the raw waypoints===========================================\n\twps_x[0] = wps_x_raw[0];\n\twps_y[0] = wps_y_raw[0];\n\n\t/*\n\twhile( wps_num != wps_num_0 ){\n\t\twps_num_0 = wps_num;\n\t\tint k = 0;\n\t\tfor (int j = 0; j < wps_num_raw-2; j++){\n\t\t\tx1 = wps_x_raw[j];\n\t\t\ty1 = wps_y_raw[j];\n\t\t\tx2 = wps_x_raw[j+1];\n\t\t\ty2 = wps_y_raw[j+1];\n\t\t\tx3 = wps_x_raw[j+2];\n\t\t\ty3 = wps_y_raw[j+2];\n\t\t\tget_r_center (x1, y1, x2, y2, x3, y3, r, cx0, cy0);\n\n\t\t\tif(r < -6 || r > 6 ){\n\t\t\t\tk = k + 1;\n\t\t\t\twps_x[k] = x2;\n\t\t\t\twps_y[k] = y2;\n\t\t\t}\n\t\t}\n\t\twps_num = k+2;\n\t\twps_x[wps_num-1] = wps_x_raw[wps_num_raw]; // keep the last waypoint the same\n\t\twps_y[wps_num-1] = wps_y_raw[wps_num_raw];\n\t}\n\t*/\n\tcout << \"wps_num =\" << wps_num << \"\\n\";\n\n\n\n\t// Get GPS =================================================================================\n\n\t//uint8_t init_msg[5] = {103,110, 115, 115, 49}; // ascii \"gnss1\"\n\t//uint8_t *hello = init_msg;\n\t//uint8_t buffer[10] = {0};\n\n\t/*\n\tint sock = 0, valread;\n\tstruct sockaddr_in serv_addr;\n\n\tif ((sock = socket(AF_INET, SOCK_DGRAM, 0)) < 0)\n\t{\n\t\tprintf(\"\\n Socket creation error \\n\");\n\t\treturn -1;\n\t}\n\tserv_addr.sin_family = AF_INET;\n\tserv_addr.sin_port = htons(PORT);\n\t//if(inet_pton(AF_INET, \"127.0.0.1\", &serv_addr.sin_addr)<=0) // Convert IPv4 and IPv6 addresses from text to binary form\n\tif(inet_pton(AF_INET, \"192.168.0.2\", &serv_addr.sin_addr)<=0) // Convert IPv4 and IPv6 addresses from text to binary form\n\t{\n\t\tprintf(\"\\nInvalid address/ Address not supported \\n\");\n\t\treturn -1;\n\t}\n\n\n\tif (connect(sock, (struct sockaddr*)&serv_addr, sizeof(serv_addr)) < 0) // ?????????????????????????????????????????????????????????????????????????\n\t{\n\t\tprintf(\"\\nConnection Failed!! \\n\");\n\t\treturn -1;\n\t}\n\n\tsend(sock , hello , 10, 0 ); //send(sock , hello , strlen(hello) , 0 );\n\t//printf(\"Hello message sent\\n\");\n\n\tvalread = read( sock , buffer, 10);\n\n\t//printf(\"%s\\n\",buffer );\n\t//printf(\"%d\\n\",buffer[0] ); \t// if printing only one element, use %d\n\t*/\n\n\n\t//---------------------------------------------------------------------------------------------------------------\n\n\t/*weiyang version*/\n\n\t/*\n\n\tuint8_t init_msg[5] = {115,109, 100, 0, 255}; // initialization steering control\n\t//uint8_t call_gnss[5] = {103,110, 115, 115, 49}; // ascii \"gnss1\"\n\n\tuint8_t steer_cg_msg[5] = {115, 98, 119, 168, 97};\n\n\n\tuint8_t reset_msg[5] = {115, 98, 119, 0, 0}; // steering angle go to zero\n\tchar recv_buf[25];\n\n\tboost::asio::io_service io_service;\n\n\tudp::socket socket(io_service);\n\n\tudp::endpoint receiver_endpoint(boost::asio::ip::address::from_string(IPADDRESS), UDP_PORT); //Mbed\n\n\tsocket.open(udp::v4());\n\n\n\tboost::system::error_code ignored_error;\n\t//boost::system::error_code error;\n\n\tsocket.send_to(boost::asio::buffer(init_msg), receiver_endpoint, 0, ignored_error);\n\tcout << \"steering initilized!\" << endl;\n\n\tsleep(2);\n\n\tsocket.send_to(boost::asio::buffer(steer_cg_msg), receiver_endpoint, 0, ignored_error);\n\tcout << \"send steering command!\" << endl;\n\n\tsleep(2);\n\n\t//socket.send_to(boost::asio::buffer(reset_msg), receiver_endpoint, 0, ignored_error);\n\t//cout << \"send steering command!\" << endl;\n\n\tcout<< \" test!!!!!!!!!!!!!!!! \"<<endl;\n\n\n\tsocket.bind(udp::endpoint(boost::asio::ip::address::from_string(IPADDRESS), UDP_PORT));\t//must bind if receive data over udp\n\n\n\t// size_t len = socket.receive_from(boost::asio::buffer(recv_buf), sender_endpoint);\n\tsocket.receive_from(boost::asio::buffer(recv_buf), receiver_endpoint);\n\t// cout << \"len: \" << len << endl;\n\n\tsocket.close();\n\n\n\tsleep(2);\n\n\n\tsocket.send_to(boost::asio::buffer(reset_msg),receiver_endpoint, 0, ignored_error);\n\tcout <<\"reset the steering!\" << endl;\n\n\n\t//lat = double(buffer[0] + buffer[1]*256 + buffer[2]*256*256 + buffer[3]*256*256*256)/10000000-90;\n\t//lon = double(buffer[4] + buffer[5]*256 + buffer[6]*256*256 + buffer[7]*256*256*256)/10000000-180;\n\t//hedg = double(buffer[8] + buffer[9]*256)/100;\n\n\t//cout << \"lat =\" << lat << \"\\n\";\n\t//cout << \"lon =\" << lon << \"\\n\";\n\t//cout << \"hedg =\" << hedg << \"\\n\";\n\n\n\t*/\n\t//---------------------------------------------------------------------------------------------------------------\n\n\n\t/*\n\t// socket文件描述符\n\tint sock_fd;\n\n\t// 建立udp socket\n\tsock_fd = socket(AF_INET, SOCK_DGRAM, 0);\n\tif(sock_fd < 0)\n\t{\n\t\tperror(\"socket\");\n\t\texit(1);\n\t}\n\n\t// 设置address\n\tstruct sockaddr_in addr_serv;\n\tint len;\n\tmemset(&addr_serv, 0, sizeof(addr_serv));\n\taddr_serv.sin_family = AF_INET;\n\taddr_serv.sin_addr.s_addr = inet_addr(DSET_IP_ADDRESS);\n\taddr_serv.sin_port = htons(DEST_PORT);\n\tlen = sizeof(addr_serv);\n\n\n\tint send_num;\n\tint recv_num;\n\t//char send_buf[20] = \"hey, who are you?\";\n\tchar send_buf[20] = {103,110, 115, 115, 49}; // ascii \"gnss1\"\n\tuint8_t recv_buf[10];\n\n\n\tsend_num = sendto(sock_fd, send_buf, strlen(send_buf), 0, (struct sockaddr *)&addr_serv, len);\n\n\tif(send_num < 0)\n\t{\n\tperror(\"sendto error:\");\n\texit(1);\n\t}\n\tprintf(\"client send: %s\\n\", send_buf);\n\n\n\t//recv_num = recvfrom(sock_fd, recv_buf, sizeof(recv_buf), 0, (struct sockaddr *)&addr_serv, (socklen_t *)&len);\n\trecv_num = recvfrom(sock_fd, recv_buf, sizeof(recv_buf), 0, NULL, NULL);\n\n\n\tcout << \"recvfrom done\" <<\"\\n\";\n\n\tif(recv_num < 0)\n\t{\n\tperror(\"recvfrom error:\");\n\texit(1);\n\t}\n\n\trecv_buf[recv_num] = '\\0';\n\tprintf(\"client receive %d bytes: %s\\n\", recv_num, recv_buf);\n\n\tclose(sock_fd);\n\n\t*/\n\n\t//---------------------------------------------------------------------------------------------------------------\n\n\n\n\t// subscribe GPS\n\n\n\n\n\n\n\n\t//exit(0); // debug -------------------------------------------------------------------------------\n\n\n\t//---------------------------------------------------------------------------------------------------------------\n\n\n\tif (simulation_mode == 1){\n\t\thedg = atan2( wps_y[1]-wps_y[0] , wps_x[1]-wps_x[0] );\t //for simulation\n\t\tx0 = wps_x[0] + (wps_x[1]-wps_x[0])/1000.0f; \t\t\t//for simulation\n\t\ty0 = wps_y[0] + (wps_y[1]-wps_y[0])/1000.0f;\t\t\t//for simulation\n\t\tcout << \"simulation mode activated!\" << \"\\n\";\n\t}else{\n\t\tlat = gnss_arr[0];\t\t\t\t\t\t//for testing\n\t\tlon = gnss_arr[1];\t\t\t\t\t\t//for testing\n\t\thedg = hedg_arr[0];\t\t\t\t\t\t//for testing\n\n\t\twhile (lat == 0 || lon == 0)\n\t\t{\n\t\t\tlat = gnss_arr[0];\n\t\t\tlon = gnss_arr[1];\n\t\t\thedg = hedg_arr[0];\n\n\t\t\tros::spinOnce();\n\t\t}\n\n\t\tx0 = (lon - lon_ref)*82230.670304;\n\t\ty0 = (lat - lat_ref)*111132.944444;\n\t\thedg = hedg2ENU(hedg);// for gnss signal 0, 360 -> -pi, pi\n\t}\n\n\n\n\t//cout << \"x0\" << \":\" << x0 << \"\\n\";\n\t//cout << \"y0\" << \":\" << y0 << \"\\n\";\n\n\n\t// Find closest waypoint index\n\td_v2wps_min = 1000;\n\tfor (int j = 0; j < wps_num; j++) {\n\t\tx1 = wps_x[j];\n\t\ty1 = wps_y[j];\n\t\tif(j == wps_num - 1){\n\t\t\tx2 = wps_x[0];\n\t\t\ty2 = wps_y[0];\n\t\t}else{\n\t\t\tx2 = wps_x[j+1];\n\t\t\ty2 = wps_y[j+1];\n\t\t}\n\n\t\tx0_frt = x0 + d_frt*cos(hedg); \n\t\ty0_frt = y0 + d_frt*sin(hedg);\n\t\tang1 = get_ang(x0, y0, x0_frt, y0_frt, x1, y1, x2, y2); // vehicle heading vector and segment vector angle\n\t\t//ang2 = get_ang(x0, y0, x2, y2, x1, y1, x2, y2);// option 2 (but with special case)\n\t\t//ang3 = get_ang(x0, y0, x1, y1, x1, y1, x2, y2);// option 2 (but with special case)\n\t\t//d_v2seg = find_d_point_2_line ( x0, y0, x1, y1, x2, y2 ); // option 2 (but with special case)\n\t\td_v2wps = sqrt((x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)); //option 1: distance to waypoint\n\n\t\t//cout << \"ang1= \" << ang1 << \"\\n\";\n\t\t//cout << \"ang2= \" << ang2 << \"\\n\";\n\n\t\tif(d_v2wps < d_v2wps_min && ang1 < 1.5708f ){ //see notes for angle 1, 2, 3\n\t\t\twps_idx = j; // save the closest segment index\n\t\t\td_v2wps_min = d_v2wps;\n\t\t}\n\t}\n\tif( d_v2wps_min == 1000 ){\n\t\tcout << \"ERROR: closest segment index not found !!!!! \" << \"\\n\";\n\t\texit(0);\n\t}else{\n\t\td_v2wps_min = 1000;\n\t\tcout << \"closest waypoint index: \" << wps_idx << \"\\n\";\n\t\tcout << \"distance to waypoint index \" << wps_idx << \": \" << d_v2wps << \"\\n\";\n\t}\n\n\t// While loop\n\twhile(ros::ok()){\n\n\t\tpos_up = false;\n\t\tang_up = false;\n\n\t\tcout<<\"while loop: Value of variable i is: \"<<i << \"-------------------------------------------------------------------\" <<endl;\n\t\ti++;\n\n\t\t// Publish waypoints and lables for rviz visualization==========================================================================\n\n\t\tif(i<50){ // need to publish for 10+ times and then received by rviz, why?\n\t\t\t// Publish waypints using visualization_msgs::Marker::POINTS================================\n\t\t\tvisualization_msgs::Marker points;// %Tag(MARKER_INIT)%\n\t\t\tpoints.header.frame_id = \"frame\";\n\t\t\tpoints.header.stamp = ros::Time::now();\n\t\t\tpoints.ns = \"waypoints\";\n\t\t\tpoints.action = visualization_msgs::Marker::ADD;\n\t\t\tpoints.pose.orientation.w = 1.0;\n\t\t\tpoints.id = 0; // %Tag(ID)%\n\t\t\tpoints.type = visualization_msgs::Marker::POINTS; // %Tag(TYPE)%\n\t\t\tpoints.scale.x = 0.2;// %Tag(SCALE)% POINTS markers use x and y scale for width/height respectively\n\t\t\tpoints.scale.y = 0.2;\n\t\t\tpoints.color.g = 1.0f;// %Tag(COLOR)% // Points are green\n\t\t\tpoints.color.a = 1.0;\n\n\t\t\tfor(int j = 0; j < wps_num; j++){\n\t\t\t\tgeometry_msgs::Point p;\n\t\t\t\tp.x = wps_x[j];\n\t\t\t\tp.y = wps_y[j];\n\t\t\t\tpoints.points.push_back(p);\n\t\t\t}\n\t\t\t//marker_pub.publish(points);\n\n\t\t\t// Publish raw waypints using visualization_msgs::Marker::POINTS=============================\n\t\t\tvisualization_msgs::Marker points1;// %Tag(MARKER_INIT)%\n\t\t\tpoints1.header.frame_id = \"frame\";\n\t\t\tpoints1.header.stamp = ros::Time::now();\n\t\t\tpoints1.ns = \"waypoints_raw\";\n\t\t\tpoints1.action = visualization_msgs::Marker::ADD;\n\t\t\tpoints1.pose.orientation.w = 1.0;\n\t\t\tpoints1.id = 0; // %Tag(ID)%\n\t\t\tpoints1.type = visualization_msgs::Marker::POINTS; // %Tag(TYPE)%\n\t\t\tpoints1.scale.x = 0.4;// %Tag(SCALE)% POINTS markers use x and y scale for width/height respectively\n\t\t\tpoints1.scale.y = 0.4;\n\t\t\tpoints1.color.g = 1.0f;// %Tag(COLOR)% // Points are green\n\t\t\tpoints1.color.r = 0.0f;// red\n\t\t\tpoints1.color.a = 1.0;\n\n\t\t\tfor(int j = 0; j < wps_num_raw; j++){\n\t\t\t\tgeometry_msgs::Point p1;\n\t\t\t\tp1.x = wps_x_raw[j];\n\t\t\t\tp1.y = wps_y_raw[j];\n\t\t\t\tpoints1.points.push_back(p1);\n\t\t\t}\n\t\t\tmarker_pub.publish(points1);\n\n\n\t\t\t// Publish waypoint's label using Marker\n\t\t\tfor(int j = 0; j < wps_num; j++){\n\t\t\t\tvisualization_msgs::Marker label;\n\t\t\t\tlabel.header.frame_id=\"frame\";\n\t\t\t\tlabel.header.stamp = ros::Time::now();\n\t\t\t\tlabel.ns = \"waypoint_label\";\n\t\t\t\tlabel.action = visualization_msgs::Marker::ADD;\n\t\t\t\tlabel.pose.orientation.w = 1.0;\n\t\t\t\tlabel.id =j; // Marker id should be unique. Any marker sent with the same namespace and id will overwrite the old one\n\t\t\t\tlabel.type = visualization_msgs::Marker::TEXT_VIEW_FACING;\n\n\t\t\t\tlabel.scale.x = 0.5;\n\t\t\t\tlabel.scale.y = 0.5;\n\t\t\t\tlabel.scale.z = 0.5;\n\n\t\t\t\tlabel.color.b = 1.0f;\n\t\t\t\tlabel.color.g = 1.0f;\n\t\t\t\tlabel.color.r = 1.0f;\n\t\t\t\tlabel.color.a = 1.0;\n\n\t\t\t\tlabel.pose.position.x = wps_x[j]; //???????????????????????????????????????????\n\t\t\t\tlabel.pose.position.y = wps_y[j];\n\t\t\t\tlabel.pose.position.z = -2.0;\n\t\t\t\tlabel.pose.orientation.x = 0.0;\n\t\t\t\tlabel.pose.orientation.y = 0.0;\n\t\t\t\tlabel.pose.orientation.z = 0.0;\n\t\t\t\tlabel.pose.orientation.w = 1.0;\n\t\t\t\tostringstream str;\n\t\t\t\tstr<<j;\n\t\t\t\tlabel.text=str.str();\n\t\t\t\tmarker_pub.publish(label);\n\t\t\t}\n\n\t\t}\n\n\n\t\t// publish vehicle's coordinates: Marker - line_list ====================================================\n\t\tvisualization_msgs::Marker vehicle_coordinates;\n\t\tvehicle_coordinates.header.frame_id = \"frame\";\n\t\tvehicle_coordinates.header.stamp = ros::Time::now();\n\t\tvehicle_coordinates.ns = \"vehicle_coordinates\";\n\t\tvehicle_coordinates.action = visualization_msgs::Marker::ADD;\n\t\tvehicle_coordinates.pose.orientation.w = 1.0;\n\t\tvehicle_coordinates.id = i; // Marker id should be unique\n\t\tvehicle_coordinates.type = visualization_msgs::Marker::POINTS;\n\t\tvehicle_coordinates.scale.x = 0.2;\n\t\tvehicle_coordinates.scale.y = 0.2;\n\t\tvehicle_coordinates.color.r = 1.0; //red\n\t\tvehicle_coordinates.color.g = 0.0; //\n\t\tvehicle_coordinates.color.b = 0.0; //blue\n\t\tvehicle_coordinates.color.a = 1.0;\n\t\tgeometry_msgs::Point p0;\n\t\tp0.x = x0;\n\t\tp0.y = y0;\n\t\tvehicle_coordinates.points.push_back(p0);\n\t\tmarker_pub.publish(vehicle_coordinates);\n\n\n\t\t// publish vehicle's propeller_shaft: Marker - line_list ====================================================\n\t\tvisualization_msgs::Marker line_list;// %Tag(MARKER_INIT)%\n\t\tline_list.header.frame_id = \"frame\";\n\t\tline_list.header.stamp = ros::Time::now();\n\t\tline_list.ns = \"propeller_shaft\";\n\t\tline_list.action = visualization_msgs::Marker::ADD;\n\t\tline_list.pose.orientation.w = 1.0;\n\t\tline_list.id = 0; // Marker id should be unique\n\t\tline_list.type = visualization_msgs::Marker::LINE_LIST;\n\t\tline_list.scale.x = 0.1;// LINE_STRIP/LINE_LIST markers use only the x component of scale, for the line width\n\t\tline_list.color.r = 0.0; //red\n\t\tline_list.color.g = 0.0; //\n\t\tline_list.color.b = 0.0; //blue\n\t\tline_list.color.a = 1.0;\n\t\tgeometry_msgs::Point p;\n\t\tp.x = x0;\n\t\tp.y = y0;\n\t\tline_list.points.push_back(p);\n\t\tp.x = x0_frt;\n\t\tp.y = y0_frt;\n\t\tline_list.points.push_back(p);\n\t\tmarker_pub.publish(line_list);\n\n\n\n\t\t// publish 3d vehicle frame: Marker - cube =================================================================\n\t\tvisualization_msgs::Marker box1;\n\t\tvisualization_msgs::Marker box2;\n\t\tbox1.header.frame_id = box2.header.frame_id = \"frame\";\n\t\tbox1.header.stamp = box2.header.stamp = ros::Time::now();\n\t\tbox1.ns = \"3d_vehicle_cubic1\";\n\t\tbox2.ns = \"3d_vehicle_cubic2\";\n\t\tbox1.id = 0;\n\t\tbox2.id = 0;\n\t\tuint32_t shape = visualization_msgs::Marker::CUBE;\n\t\tbox1.type = box2.type = shape;\n\t\tbox1.action = box2.action = visualization_msgs::Marker::ADD;\n\t\tbox1.pose.position.x = x0;\n\t\tbox1.pose.position.y = y0;\n\t\tbox1.pose.position.z = 2;\n\t\tbox2.pose.position.x = x0_frt;\n\t\tbox2.pose.position.y = y0_frt;\n\t\tbox2.pose.position.z = 1.6;\n\t\t//box.pose.orientation.x = 1.0;\n\t\t//box.pose.orientation.y = 1.0;\n\t\t//box.pose.orientation.z = 1.0;\n\t\t//box.pose.orientation.w = 1.0;\n\t\tbox1.pose.orientation = box2.pose.orientation = tf::createQuaternionMsgFromYaw(hedg); //convert euler (yaw) to quaternion for rviz visualization; header file: <tf/transform_datatypes.h>\n\n\t\tbox1.scale.x = 4.0;\n\t\tbox1.scale.y = 2.3;\n\t\tbox1.scale.z = 2.5;\n\n\t\tbox2.scale.x = 1.2;\n\t\tbox2.scale.y = 2.0;\n\t\tbox2.scale.z = 1.8;\n\n\t\tbox1.color.r = box2.color.r = 1.0f;\n\t\tbox1.color.g = box2.color.g = 1.0f;\n\t\tbox1.color.b = box2.color.b = 1.0f;\n\t\tbox1.color.a = 0.5;\n\t\tbox2.color.a = 0.5;\n\n\t\tbox1.lifetime = box2.lifetime = ros::Duration();\n\t\tmarker_pub.publish(box1);\n\t\tmarker_pub.publish(box2);\n\n\n\t\t//publish 4 wheels: Marker - cylinder ======================================================================\n\n\t\tfloat frx, fry, flx, fly, rrx, rry, rlx, rly;\n\t\tfloat d1 = 2.5; // wheelbase\n\t\tfloat d2 = 1.0; // half track distance\n\n\t\tfrx = x0 + cos(hedg)*d1 - sin(hedg)*(-d2);\n\t\tfry = y0 + sin(hedg)*d1 + cos(hedg)*(-d2);\n\t\tflx = x0 + cos(hedg)*d1 - sin(hedg)*(d2);\n\t\tfly = y0 + sin(hedg)*d1 + cos(hedg)*(d2);\n\t\trrx = x0 - sin(hedg)*(-d2);\n\t\trry = y0 + cos(hedg)*(-d2);\n\t\trlx = x0 - sin(hedg)*(d2);\n\t\trly = y0 + cos(hedg)*(d2);\n\n\t\tvisualization_msgs::Marker frw, flw, rrw, rlw; //front left wheel\n\t\tfrw.header.frame_id = flw.header.frame_id = rrw.header.frame_id = rlw.header.frame_id = \"frame\";\n\t\tfrw.header.stamp = flw.header.stamp = rrw.header.stamp = rlw.header.stamp = ros::Time::now();\n\t\tfrw.ns = flw.ns = rrw.ns = rlw.ns = \"wheel\";\n\t\tfrw.id = 0;\n\t\tflw.id = 1;\n\t\trrw.id = 2;\n\t\trlw.id = 3;\n\n\t\tuint32_t shape1 = visualization_msgs::Marker::CYLINDER;\n\n\t\tfrw.type = flw.type = rrw.type = rlw.type = shape1;\n\t\tfrw.action = flw.action = rlw.action = rrw.action = visualization_msgs::Marker::ADD;\n\t\tfrw.pose.position.x = frx;\n\t\tfrw.pose.position.y = fry;\n\t\tflw.pose.position.x = flx;\n\t\tflw.pose.position.y = fly;\n\t\trrw.pose.position.x = rrx;\n\t\trrw.pose.position.y = rry;\n\t\trlw.pose.position.x = rlx;\n\t\trlw.pose.position.y = rly;\n\n\t\tfrw.pose.orientation = flw.pose.orientation = rrw.pose.orientation = rlw.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw( 0.0f, 1.5707f,hedg+1.5707f); //convert euler (yaw) to quaternion for rviz visualization; header file: <tf/transform_datatypes.h>\n\n\t\tfrw.scale.x = flw.scale.x = rrw.scale.x = rlw.scale.x = 1.0;\n\t\tfrw.scale.y = flw.scale.y = rrw.scale.y = rlw.scale.y = 1.0;\n\t\tfrw.scale.z = flw.scale.z = 0.25;\n\t\trrw.scale.z = rlw.scale.z = 0.5;\n\n\t\tfrw.color.r = flw.color.r = rrw.color.r = rlw.color.r = 0.0f;\n\t\tfrw.color.g = flw.color.g = rrw.color.g = rlw.color.g = 0.0f;\n\t\tfrw.color.b = flw.color.b = rrw.color.b = rlw.color.b = 0.0f;\n\t\tfrw.color.a = flw.color.a = rrw.color.a = rlw.color.a = 1.0;\n\n\t\tfrw.lifetime = flw.lifetime = rrw.lifetime = rlw.lifetime = ros::Duration();\n\n\t\tmarker_pub.publish(frw);\n\t\tmarker_pub.publish(flw);\n\t\tmarker_pub.publish(rrw);\n\t\tmarker_pub.publish(rlw);\n\n\n\t\t//publish ISUZU logos using visualization_msgs::Marker::TEXT_VIEW_FACING ======================================================================\n\n\t\tfloat logo1x, logo1y;\n\t\td1 = 0.0; // wheelbase\n\t\td2 = 0.0; // half track distance\n\n\t\tlogo1x = x0 + cos(hedg)*d1 - sin(hedg)*(-d2);\n\t\tlogo1y = y0 + sin(hedg)*d1 + cos(hedg)*(-d2);\n\n\n\t\tvisualization_msgs::Marker logo1;\n\t\tlogo1.header.frame_id = \"frame\";\n\t\tlogo1.header.stamp = ros::Time::now();\n\t\tlogo1.ns = \"logo\";\n\t\tlogo1.id = 0;\n\n\n\t\tuint32_t shape2 = visualization_msgs::Marker::TEXT_VIEW_FACING;\n\n\t\tlogo1.type = shape2;\n\t\tlogo1.action = visualization_msgs::Marker::ADD;\n\t\tlogo1.pose.position.x = logo1x;\n\t\tlogo1.pose.position.y = logo1y;\n\t\tlogo1.pose.position.z = 4;\n\n\n\t\tlogo1.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw( 0.0f, 0.0f,hedg+1.5707f);\n\n\t\tlogo1.scale.z = 0.8;\n\n\t\tlogo1.color.r = 1.0f;\n\t\tlogo1.color.g = 0.0f;\n\t\tlogo1.color.b = 0.0f;\n\t\tlogo1.color.a = 1.0;\n\n\t\tlogo1.lifetime = ros::Duration();\n\n\t\tlogo1.text=\"ISUZU\";\n\n\t\tmarker_pub.publish(logo1);\n\n\n\n\t\t// broadcast another frame: parent: \"frame\" -> child \"car\" (to add a target view on the vehicle)=======================================\n\t\ttransform.setOrigin( tf::Vector3(x0, y0, 0.0) );\n\t\ttransform.setRotation( tf::Quaternion(0, 0, 0, 1) );\n\t\tbr.sendTransform(tf::StampedTransform(transform, ros::Time::now(), \"frame\", \"car\"));\n\n\n\t\t//================================================================================\n\n\n\n\n\t\t// Get GPS\n\n\n\n\t\tif(simulation_mode != 1){\n\t\t\tlat = gnss_arr[0];\n\t\t\tlon = gnss_arr[1];\n\t\t\thedg = hedg_arr[0];\n\n\t\t\t// while (pos_up != true && ang_up != true)\n\t\t\t// {\n\t\t\t// \tlat = gnss_arr[0];\n\t\t\t// \tlon = gnss_arr[1];\n\t\t\t// \thedg = hedg_arr[0];\n\n\t\t\t// \tros::spinOnce();\n\t\t\t// }\n\n\t\t\tpos_up = false;\n\t\t\tang_up = false;\n\t\t\tx0 = (lon - lon_ref)*82230.670304; \t\t\t\t//for testing\n\t\t\ty0 = (lat - lat_ref)*111132.944444;\t\t\t\t//for testing\n\t\t\thedg = hedg2ENU(hedg);// for gnss signal 0, 360 -> -pi, pi\t//for testing\n\n\t\t\tcout<< \"lat = \" << lat <<\"\\n\";\n\t\t\tprintf(\"lat = %0.7f\\n\", lat);\n\t\t\tcout<< \"lon = \" << lon <<\"\\n\";\n\n\t\t}\n\n\n\t\t//cout<< \"hedg =\" <<hedg <<\"\\n\";\n\t\t//printf(\"heading = %f\\n\", hedg);\n\t\t//printf(\"x0: %f, y0: %f\\n\",x0, y0);\n\n\n\n\t\t// Create a waypoint window with waypoint index\n\t\tfor (int j = 0; j < wps_wdw_num; j++ ){\n\t\t\tint idx_crt = wps_idx - 1 + j;\n\n\t\t\tif(idx_crt < wps_num && idx_crt >= 0){\n\t\t\t\twps_wdw[j] = idx_crt;\n\t\t\t}else if(idx_crt >= wps_num){\n\t\t\t\twps_wdw[j] = idx_crt - wps_num;\n\t\t\t}else if(idx_crt < 0){\n\t\t\t\twps_wdw[j] = idx_crt + wps_num;\n\t\t\t}\n\t\t}\n\n\t\tfor(int j = 0; j < wps_wdw_num; j++){\n\t\t\tif(j == 0){cout << \"waypoint window: [ \";}\n\t\t\tcout << wps_wdw[j] << \" \";\n\t\t\tif(j == wps_wdw_num -1){cout << \"] \\n\";}\n\t\t}\n\n\t\t//cout << \"first point in waypoint window: \" << wps_wdw[0] <<\"\\n\";\t\t\n\t\t//cout << \"second point in waypoint window: \" << wps_wdw[1] <<\"\\n\";\n\n\n\t\t// Update closest waypoint index from current one to the next one\n\t\td_v2wps_min = 1000;\n\t\tfor (int j = 0; j < 3; j++) {\n\t\t\tx1 = wps_x[wps_wdw[j]];\n\t\t\ty1 = wps_y[wps_wdw[j]];\n\t\t\tx2 = wps_x[wps_wdw[j+1]];\n\t\t\ty2 = wps_y[wps_wdw[j+1]];\n\t\t\tx3 = wps_x[wps_wdw[j+2]];\n\t\t\ty3 = wps_y[wps_wdw[j+2]];\n\t\t\tx4 = wps_x[wps_wdw[j+3]];\n\t\t\ty4 = wps_y[wps_wdw[j+3]];\n\t\t\tx5 = wps_x[wps_wdw[j+4]];\n\t\t\ty5 = wps_y[wps_wdw[j+4]];\n\t\t\tx6 = wps_x[wps_wdw[j+5]];\n\t\t\ty6 = wps_y[wps_wdw[j+5]];\n\t\t\tcout << \"x4=\" << x4 << \"; y4=\" << y4 << \"\\n\";\n\t\t\tcout << \"x5=\" << x5 << \"; y5=\" << y5 << \"\\n\";\n\t\t\tcout << \"x6=\" << x6 << \"; y6=\" << y6 << \"\\n\";\n\n\n\t\t\tx0_frt = x0 + d_frt*cos(hedg); \n\t\t\ty0_frt = y0 + d_frt*sin(hedg);\n\t\t\t//ang1 = get_ang(x0, y0, x0_frt, y0_frt, x1, y1, x2, y2); // vehicle heading vector and segment vector angle\n\t\t\t//ang2 = get_ang(x0, y0, x2, y2, x1, y1, x2, y2); // option 2 (but with special case)\n\t\t\t//ang3 = get_ang(x0, y0, x1, y1, x1, y1, x2, y2); // option 2 (but with special case)\n\t\t\t//d_v2seg = find_d_point_2_line ( x0, y0, x1, y1, x2, y2 ); // option 2 (but with special case)\n\t\t\td_v2wps = sqrt((x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)); //option 1: find distance to waypoint\n\t\t\t//cout << \"ang1= \" << ang1 << \"\\n\";\n\t\t\t//cout << \"ang2= \" << ang2 << \"\\n\";\n\t\t\t//cout << \"ang3= \" << ang3 << \"\\n\";\n\t\t\t//cout << \"distance to waypoint index \" << wps_wdw[j] << \"-\" << wps_wdw[j+1] <<\":\" << d_v2wps << \"\\n\";\n\n\n\t\t\tif(d_v2wps < d_v2wps_min ){ \n\t\t\t\twps_idx = wps_wdw[j]; // update the closest global waypoint index\n\t\t\t\td_v2wps_min = d_v2wps;\n\t\t\t\twps_wdw_idx = j; // update window waypoint index\n\t\t\t}\n\n\t\t\t//cout<< \"d v 2 wps =\" <<d_v2wps <<\"\\n\";\n\t\t\t//cout<< \"d v 2 wps min =\" <<d_v2wps_min <<\"\\n\";\t\t\t\n\t\t}\n\t\tif( d_v2wps_min == 1000 ){\n\t\t\tcout << \"ERROR: closest segment index not found !!!!! \" << \"\\n\";\n\t\t\texit(0);\t\n\t\t}else{\n\t\t\tcout << \"closest waypoint index: [ ...... \" << wps_wdw[wps_wdw_idx] << \" ...... ] \\n\";\n\t\t\tcout << \"distance to waypoint index \" << wps_idx << \": \" << d_v2wps_min << \"\\n\";\n\t\t}\n\n\t\td_v2wps_min = 1000;\n\n\n\t\n\n\t\t// Find r / curvature ----------------------------------------------------------------------------------------------\n\t\tx1 = wps_x[wps_wdw[wps_wdw_idx]];\n\t\ty1 = wps_y[wps_wdw[wps_wdw_idx]];\n\t\tx2 = wps_x[wps_wdw[wps_wdw_idx+1]];\n\t\ty2 = wps_y[wps_wdw[wps_wdw_idx+1]];\n\t\tx3 = wps_x[wps_wdw[wps_wdw_idx+2]];\n\t\ty3 = wps_y[wps_wdw[wps_wdw_idx+2]];\n\t\tcout << \"x1:\" << x1 << \" y1:\"<<y1<<\"\\n\"<<\"x2:\"<<x2<<\" y2:\"<<y2<<\"\\n\"<<\"x3:\"<<x3<<\" y3:\"<<y3<<\"\\n\";\n\t\tget_r_center (x1, y1, x2, y2, x3, y3, r, cx0, cy0);\n\t\t//cout << \"cx0:\" << cx0 <<\", \" << \"cy0:\" << cy0 << \",\" << \"r:\" << r << \"\\n\";\n\n\t\t// Determine steering control mode ---------------------------------------------------------------------------------\n\t\tif(r>-50 && r<=0 || r>0 && r<50){\n\t\t\tstr_ctrl_mode = 0;\n\n\t\t}else{\n\t\t\tstr_ctrl_mode = 1;\n\n\t\t}\n\n\t\t//str_ctrl_mode = 2; // 2.MPC 3. pure pursuit --------------------------------------------------------------------------------> change mode manually!\n\n\n\t\t// Run steering controller (switch case/ str mode) -------------------------------------------------------------------\n\t\tswitch(str_ctrl_mode){\n\n\t\t\tcase 0: // 3pts curv pid\n\t\t\t\t{\n\t\t\t\tcout << \"3pts curve tracking PID mode is activated now!\" << \"\\n\";\n\t\t\t\td_o2v = sqrt((cx0-x0_frt)*(cx0-x0_frt)+(cy0-y0_frt)*(cy0-y0_frt));\n\t\t\t\toffset_ctrl = abs(r) - d_o2v;\n\t\t\t\tif (r < 0){\n\t\t\t\t\toffset_ctrl = - offset_ctrl;\n\t\t\t\t}\n\t\t\t\tKp = 60; // P gain: 1000\n\t\t\t\td_offset = offset_ctrl - offset_ctrl_pre;\n\t\t\t\toffset_ctrl_pre = offset_ctrl;\n\t\t\t\tif ( abs(offset_ctrl) > 0.3 ){\n\t\t\t\t\tKd = 100; // D gain: 3000\n\t\t\t\t}else{\n\t\t\t\t\tKd = 30; // D gain: 1000\n\t\t\t\t}\n\t\t\t\tstr_wheel_ang_ref = VL22_model_r2str(r);\n\n\t\t\t\tcout << \"str_wheel_ang_ref = \" << str_wheel_ang_ref << \"\\n\";\n\n\t\t\t\tstr_wheel_ang = str_wheel_ang_ref + Kp*offset_ctrl + Kd*d_offset;\n\n\n\t\t\t\tcout << \"r=\" << r << \"; d_o2v=\" << d_o2v << \"; offset=\" << offset_ctrl << \"; d_offset=\" << d_offset << \"\\n\";\n\t\t\t\tcout << \"str_wheel_ang_ref =\" << str_wheel_ang_ref << \"; str_wheel_ang =\" << str_wheel_ang << \"\\n\";\n\n\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\tcase 1: // 2pts straight pid\n\t\t\t\t{\n\t\t\t\tcout << \"2pts straight line tracking PID mode is activated now\" << \"\\n\";\n\t\t\t\td_p2line = find_d_point_2_line( x0_frt, y0_frt, x1, y1, x2, y2 );\n\t\t\t\tside2line = find_side_2_line( x0_frt, y0_frt, x1, y1, x2, y2 );\n\t\t\t\toffset_ctrl = d_p2line * side2line;\n\t\t\t\tif ( abs(offset_ctrl) > 1 ){\n\t\t\t\t\tKp = 100; // 500\n\t\t\t\t}else{\n\t\t\t\t\tKp = 80; // 300\n\t\t\t\t}\n\t\t\t\td_offset = offset_ctrl - offset_ctrl_pre;\n\t\t\t\toffset_ctrl_pre = offset_ctrl;\n\t\t\t\tif ( abs(offset_ctrl) > 0.3 ){\n\t\t\t\t\tKd = 250; // 3000\n\t\t\t\t}else{\n\t\t\t\t\tKd = 500; // 1000\n\t\t\t\t}\n\t\t\t\tstr_wheel_ang_ref = 0;\n\n\t\t\t\tcout << \"str_wheel_ang_ref = \" << str_wheel_ang_ref << \"\\n\";\n\n\t\t\t\tstr_wheel_ang = str_wheel_ang_ref + Kp*offset_ctrl + Kd*d_offset;\n\t\t\t\tcout << \"offset=\" << offset_ctrl << \"; str_wheel_ang =\" << str_wheel_ang << \"\\n\";\n\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\tcase 2:// udacity MPC\n\t\t\t\t{\n\t\t\t\tcout << \"MPC mode is activated now!\" << \"\\n\";\t\t\t\t\n\t\t\t\tvector<double> ptsx {wps_x[wps_wdw[wps_wdw_idx]], wps_x[wps_wdw[wps_wdw_idx+1]], wps_x[wps_wdw[wps_wdw_idx+2]], wps_x[wps_wdw[wps_wdw_idx+3]], wps_x[wps_wdw[wps_wdw_idx+4]], wps_x[wps_wdw[wps_wdw_idx+5]], wps_x[wps_wdw[wps_wdw_idx+6]], wps_x[wps_wdw[wps_wdw_idx+7]], wps_x[wps_wdw[wps_wdw_idx+8]], wps_x[wps_wdw[wps_wdw_idx+9]], wps_x[wps_wdw[wps_wdw_idx+10]], wps_x[wps_wdw[wps_wdw_idx+11]]};\n\t\t\t\tvector<double> ptsy {wps_y[wps_wdw[wps_wdw_idx]], wps_y[wps_wdw[wps_wdw_idx+1]], wps_y[wps_wdw[wps_wdw_idx+2]], wps_y[wps_wdw[wps_wdw_idx+3]], wps_y[wps_wdw[wps_wdw_idx+4]], wps_y[wps_wdw[wps_wdw_idx+5]], wps_y[wps_wdw[wps_wdw_idx+6]], wps_y[wps_wdw[wps_wdw_idx+7]], wps_y[wps_wdw[wps_wdw_idx+8]], wps_y[wps_wdw[wps_wdw_idx+9]], wps_y[wps_wdw[wps_wdw_idx+10]], wps_y[wps_wdw[wps_wdw_idx+11]]};\n\t\t\t\t\n\t\t\t //vector<double> ptsx {wps_x[wps_wdw[wps_wdw_idx]], wps_x[wps_wdw[wps_wdw_idx+1]], wps_x[wps_wdw[wps_wdw_idx+2]], wps_x[wps_wdw[wps_wdw_idx+3]]};\n\t\t\t\t//vector<double> ptsy {wps_y[wps_wdw[wps_wdw_idx]], wps_y[wps_wdw[wps_wdw_idx+1]], wps_y[wps_wdw[wps_wdw_idx+2]], wps_y[wps_wdw[wps_wdw_idx+3]]};\n\t\t\t\t\n\t\t\t\tdouble px = x0;\n\t\t\t\tdouble py = y0;\n\t\t\t\tdouble psi = hedg;\n\t\t\t\tdouble v = 3; // 0.89m/s=2mph\n\n\n\t\t\t\tdouble steer_value;\n\t\t\t\tdouble throttle_value;\n\t\t\t\tvector<double> mpc_x_v;\n\t\t\t\tvector<double> mpc_y_v;\n\n\t\t\t\tvector<double> next_x_v(ptsx.size());\n\t\t\t\tvector<double> next_y_v(ptsx.size());\n\n\n\n\n\t\t\t\tvector<double> x_ref(11);\n\t\t\t\tvector<double> y_ref(11);\n\n\n\n\t\t\t\ttry\n\t\t\t\t{\n\t\t\t\t\tmpc_fun(ptsx, ptsy, px, py, psi, v, steer_value, throttle_value, mpc_x_v, mpc_y_v, next_x_v, next_y_v, x_ref, y_ref);\n\t\t\t\t}\n\t\t\t\tcatch(const std::exception& e)\n\t\t\t\t{\n\t\t\t\t\tcout << \"mpc function error.......\" << '\\n';\n\t\t\t\t}\n\t\t\t\tmpc_x_vals = mpc_x_v;\n\t\t\t\tmpc_y_vals = mpc_y_v;\n\n\n\n\t\t\t\tnext_x_vals = next_x_v;\n\t\t\t\tnext_y_vals = next_y_v;\n\n\t\t\t\tmpc_x_ref = x_ref; \n\t\t\t\tmpc_y_ref = y_ref;\n\n\n\n\t\t\t\t//str_ang = mpc_func(......);\n\t\t\t\t//str_step = str_ang(udacity)/25000*720??\n\t\t\t\tcout << \"steer_value =\" << steer_value << \"\\n\";\n\t\t\t\tstr_step = steer_value / deg2rad(25);\n\t\t\t\tstr_step = 25000*str_step;\n\n\n\n\n\n\t\t\t\tcout << \"size next_x_vals =\" << next_x_vals.size() << endl; \n\t\t\t\tcout << \"size mpc_x_ref =\" << mpc_x_ref.size() << endl; // = 0 ???!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n\t\t\t\t//exit(0);\n\n\n\n\n\n\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\tcase 3://pure pursuit\t\n\t\t\t\t{\n\t\t\t\tcout << \"pure pursuit mode is activated now!\" << \"\\n\";\n\t\t\t\td_o2v = sqrt((cx0-x0_frt)*(cx0-x0_frt)+(cy0-y0_frt)*(cy0-y0_frt));\n\t\t\t\toffset_ctrl = abs(r) - d_o2v;\n\t\t\t\tif (r < 0){\n\t\t\t\t\toffset_ctrl = - offset_ctrl;\n\t\t\t\t}\n\n\t\t\t\tstr_step = trj_plan_pp( x0, y0, hedg, x1, y1, x2, y2 );\n\t\t\t\t}\n\n\t\t\t\n\t\t}\n\n\n\n\n\t\t//cout << \"size next_x_vals =\" << next_x_vals.size() << endl; \n\t\t//cout << \"size mpc_x_ref =\" << mpc_x_ref.size() << endl; // = 0 ???!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n\n\n\n\t\t// Transmit steering cmd ----------------------------------------------------------------------------------------------------------\n\n\t\t/*\n\t\t// +++++++++ crew cab 905 testing +++++++++++++++++\n\n\t\tstr_cmd\t= -str_step; // 0828 larger steering angle // use neg sign to match mbed\n\t\t\t\t // send an array with 20 elements to Mbed\n\n\t\tcout << \"str_cmd =\" << str_cmd << \"\\n\";\n\n\t\tint str_cmd_int = int(str_cmd);\n\n\t\tint str_cmd_string[2];\n\n\t\tstr_cmd_string[0] = (str_cmd_int >> 8) & 0xFF;\n\t\tstr_cmd_string[1] = str_cmd_int & 0xFF;\n\t\tcout << \"str_cmd_string [0] =\" << str_cmd_string[0] << \"\\n\";\n\t\tcout << \"str_cmd_string [1] =\" << str_cmd_string[1] << \"\\n\";\n\n\t\tstd_msgs::Int16 str_msg;\n\n\t\t//str_msg.data = os.str();\n\t\tstr_msg.data = str_cmd_int;\n\n\t\t//float_pub.publish(str_msg);\n\t\tstr_pub.publish(str_msg);\n\n\t\t*/\n\t\t\n\n\t\t// ++++++++++ vl22 22787 testing +++++++++++\n\t\t\n\t\t\n\t\tstd_msgs::Int16MultiArray str_cmd_msg; // type: std_msgs::Int16 / topic name: \"str_wheel_ang_cmd\"\n\n\n\t\tint16_t str_wheel_ang_cmd = int16_t(str_wheel_ang);\n\n\tcout << \"str_wheel_ang_cmd\" << str_wheel_ang_cmd<<\"\\n\";\n\n cout << \"mpc debug 1\" << \"\\n\";\n\t\tstr_cmd_msg.data.clear();\n\t\tstr_cmd_msg.data.push_back(str_wheel_ang_cmd);\n\n cout << \"mpc debug 2\" << \"\\n\";\n\n\t\tstr_pub.publish(str_cmd_msg);\n\n\n cout << \"mpc debug 3\" << \"\\n\";\n\n\n\n\n\n\n\t\t//exit(0); // debug -------------------------------------------------------------------------------\n\n\n\n\n\t\t// Publish the error ============================================================================================\n\t\tstd_msgs::Float64 error;\n\t\terror.data = offset_ctrl;\n\t\tfloat_pub.publish(error);\n\n\t\t// publish planned trjectory by mpc\n\t\ttheta1 = atan2(y1 - cy0, x1 - cx0); // find the starting point based on the parametric equation of the circle\n\t\ttheta3 = atan2(y3 - cy0, x3 - cx0); // find the starting point based on the parametric equation of the circle\n\t\tif(theta3 - theta1 > 3.1415926){\n\t\t\ttheta3 = theta3 - 2*3.1415926;\n\t\t}else if(theta3 - theta1 < -3.1415926){\n\t\t\ttheta3 = theta3 + 2*3.1415926;\n\t\t}\n\n\t\tvisualization_msgs::Marker traj;\n\t\ttraj.header.frame_id = \"frame\";\n\t\ttraj.header.stamp = ros::Time::now();\n\t\ttraj.ns = \"trajectory\";\n\t\ttraj.action = visualization_msgs::Marker::ADD;\n\t\ttraj.id = 0; // Marker id should be unique\n\t\ttraj.type = visualization_msgs::Marker::POINTS;\n\t\ttraj.scale.x = 0.5;\n\t\ttraj.scale.y = 0.5;\n\t\ttraj.scale.z = 0.5;\n\t\ttraj.color.r = 0.0;\n\t\ttraj.color.g = 0.0;\n\t\ttraj.color.b = 1.0; //blue\n\t\ttraj.color.a = 1.0;\n\n\t\tif(str_ctrl_mode == 0){\n\t\t\tfor(int j=0;j<21;j++){\n\t\t\t\tgeometry_msgs::Point p2;\n\t\t\t\tp2.x = cx0 + abs(r)*cos(theta1 + j*(theta3 - theta1)/20.0f);\n\t\t\t\tp2.y = cy0 + abs(r)*sin(theta1 + j*(theta3 - theta1)/20.0f);\n\t\t\t\ttraj_x[j] = p2.x;\n\t\t\t\ttraj_y[j] = p2.y;\n\n\t\t\t\t//cout<<\"theta1=\"<<theta1<<\"; theta3=\"<<theta3<<\"\\n\";\n\t\t\t\t//cout<<\"traj_x = \" << p2.x <<\"; traj_y = \" << p2.y << \"\\n\";\n\t\t\t\ttraj.points.push_back(p2);\n\n\t\t\t}\n\t\t}else if(str_ctrl_mode == 1){\n\t\t\tfor(int j=0;j<21;j++){\n\t\t\t\tgeometry_msgs::Point p2;\n\t\t\t\tp2.x = x1 + j*(x3 - x1)/20.0f;\n\t\t\t\tp2.y = y1 + j*(y3 - y1)/20.0f;\n\t\t\t\ttraj_x[j] = p2.x;\n\t\t\t\ttraj_y[j] = p2.y;\n\t\t\t\ttraj.points.push_back(p2);\n\t\t\t}\n\t\t}else if(str_ctrl_mode == 2){\n\t\t\tfor(int j=0;j<mpc_x_vals.size();j++){\n\t\t\t\tgeometry_msgs::Point p2;\n\t\t\t\tp2.x = x0+mpc_x_vals[j]*cos(-hedg)+mpc_y_vals[j]*sin(-hedg);\n\t\t\t\tp2.y = y0-mpc_x_vals[j]*sin(-hedg)+mpc_y_vals[j]*cos(-hedg);\n\t\t\t\ttraj_x[j] = p2.x;\n\t\t\t\ttraj_y[j] = p2.y;\n\t\t\t\ttraj.points.push_back(p2);\n\t\t\t\t// cout << \"mpc_x_vals[\" << j << \"]]=\" << traj_x[j] << \"; mpc_y_vals[\" << j << \"]]=\" << traj_y[j] << '\\n';\n\t\t\t}\n\t\t\tcout << \"mpc.size = \" << mpc_x_vals.size() << '\\n';\n\t\t}\n\n\n\t\tmarker_pub.publish(traj);\n\n\n\n\t\t// publish reference waypoints (for poly-fit)\n\t\tvisualization_msgs::Marker refer;\n\t\trefer.header.frame_id = \"frame\";\n\t\trefer.header.stamp = ros::Time::now();\n\t\trefer.ns = \"reference\";\n\t\trefer.action = visualization_msgs::Marker::ADD;\n\t\trefer.id = 0; // Marker id should be unique\n\t\trefer.type = visualization_msgs::Marker::POINTS;\n\t\trefer.scale.x = 0.5;// LINE_STRIP/LINE_LIST markers use only the x component of scale, for the line width\n\t\trefer.scale.y = 0.5;\n\t\trefer.scale.z = 0.5;\n\t\trefer.color.r = 0.0; //red\n\t\trefer.color.g = 1.0; //green\n\t\trefer.color.a = 1;\n\t\tif(str_ctrl_mode == 2){\n\t\t\tfor(int j=0;j<next_x_vals.size();j++){\n\t\t\t\tgeometry_msgs::Point p3;\n\t\t\t\tp3.x = x0+next_x_vals[j]*cos(-hedg)+next_y_vals[j]*sin(-hedg);\n\t\t\t\tp3.y = y0-next_x_vals[j]*sin(-hedg)+next_y_vals[j]*cos(-hedg);\n\t\t\t\trefer.points.push_back(p3);\n\t\t\t\tmarker_pub.publish(refer);\n\t\t\t}\n\t\t}\n\n\n\n\t\t// publish reference trajectory fitting result\n\t\tvisualization_msgs::Marker reference_trajectory_fitting;\n\t\treference_trajectory_fitting.header.frame_id = \"frame\";\n\t\treference_trajectory_fitting.header.stamp = ros::Time::now();\n\t\treference_trajectory_fitting.ns = \"reference_trajectory_fit\";\n\t\treference_trajectory_fitting.action = visualization_msgs::Marker::ADD;\n\t\treference_trajectory_fitting.pose.orientation.w = 1.0;\n\t\treference_trajectory_fitting.id = 0; // Marker id should be unique\n\t\treference_trajectory_fitting.type = visualization_msgs::Marker::LINE_STRIP;\n\t\treference_trajectory_fitting.scale.x = 0.8;// LINE_STRIP/LINE_LIST markers use only the x component of scale, for the line width\n\t\treference_trajectory_fitting.color.r = 0.0; //red\n\t\treference_trajectory_fitting.color.g = 1.0; //green\n\t\treference_trajectory_fitting.color.b = 0.0;\n\t\treference_trajectory_fitting.color.a = 0.7;\n\n\t\tif(str_ctrl_mode == 2){\n\t\t\tfor(int j=0;j<mpc_x_ref.size();j++){\n\t\t\t\tgeometry_msgs::Point p4;\n\t\t\t\tp4.x = x0+mpc_x_ref[j]*cos(-hedg)+mpc_y_ref[j]*sin(-hedg);\n\t\t\t\tp4.y = y0-mpc_x_ref[j]*sin(-hedg)+mpc_y_ref[j]*cos(-hedg);\n\t\t\t\treference_trajectory_fitting.points.push_back(p4);\n\t\t\t}\n\t\t\tmarker_pub.publish(reference_trajectory_fitting);\n\t\t}\n\n\n\t\t/*\n\t\t// debug: publish the closest waypoints\n\t\tvisualization_msgs::Marker closest_waypoints;\n\t\tclosest_waypoints.header.frame_id = \"frame\";\n\t\tclosest_waypoints.header.stamp = ros::Time::now();\n\t\tclosest_waypoints.ns = \"closest_waypoints\";\n\t\tclosest_waypoints.action = visualization_msgs::Marker::ADD;\n\t\tclosest_waypoints.pose.orientation.w = 1.0;\n\t\tclosest_waypoints.id = 0; // Marker id should be unique\n\t\tclosest_waypoints.type = visualization_msgs::Marker::LINE_STRIP;\n\t\tclosest_waypoints.scale.x = 1.0;// LINE_STRIP/LINE_LIST markers use only the x component of scale, for the line width\n\t\tclosest_waypoints.color.r = 1.0; //red\n\t\tclosest_waypoints.color.g = 1.0; //green\n\t\tclosest_waypoints.color.a = 1.0;\n\n\t\tif(str_ctrl_mode == 2){\n\t\t\tgeometry_msgs::Point p5;\n\t\t\tp5.x = x0+x1*cos(-hedg)+y1*sin(-hedg);\n\t\t\tp5.y = y0-x1*sin(-hedg)+y1*cos(-hedg);\n\t\t\treference_trajectory_fitting.points.push_back(p5);\n\t\t\tp5.x = x0+x2*cos(-hedg)+y2*sin(-hedg);\n\t\t\tp5.y = y0-x2*sin(-hedg)+y2*cos(-hedg);\n\t\t\treference_trajectory_fitting.points.push_back(p5);\n\n\t\t\tmarker_pub.publish(closest_waypoints);\n\t\t}\n\n\t\t*/\n\n\n\n\t\t//debug==============================================================================================================\n\n\t\tfloat r2 = sqrt((x1-cx0)*(x1-cx0)+(y1-cy0)*(y1-cy0));\n\t\tfloat r3 = sqrt((x3-cx0)*(x3-cx0)+(y3-cy0)*(y3-cy0));\n\t\tcout <<\"r=\"<< r <<\"; r2=\"<<r2<<\"; r3=\"<<r3<<\"\\n\";\n\n\n\t\t/*\n\t\tif(abs(theta1-theta3)>3.1415926){\n\t\t\texit(0);\n\t\t}\n\n\t\tif((traj_x[0] - x1) > 2 || (traj_y[0] - y1) > 2){\n\t\t\texit(0);\n\t\t}\n\n\t\tif( offset_ctrl > 20 || offset_ctrl < -20 ){\n\t\t\texit(0);\n\t\t}\n\t\t*/\n\n\t\t//if(wps_idx == 38){\n\t\t//\texit(0);\n\t\t//}\n\n\n\n\n\n\n\t\t//simulation =========================================================================================================\n\n\t\tif(simulation_mode == 1){\n\t\t\tint switch_model = 0; // 0: kinematic, 1: lateral dynamic\n\t\t\tswitch(switch_model){\n\t\t\t\t//case 0: MKZ_model ( x0, y0, hedg, str_step, d_travel, add_x0, add_y0, add_hedg );\n\t\t\t\tcase 0: truck_kinematics_model( x0, y0, hedg, str_step, d_travel, add_x0, add_y0, add_hedg );\n\t\t\t\tcase 1: dynamic_model ( x0, y0, hedg, str_step, d_travel, add_x0, add_y0, add_hedg );\n\t\t\t}\n\t\t\tx0 = add_x0;\n\t\t\ty0 = add_y0;\n\t\t\thedg = add_hedg;\n\t\t\tx0_frt = x0 + d_frt*cos(hedg); // ??????????????????????????????????????????????????????????????????????????????? cos??? sin???\n\t\t\ty0_frt = y0 + d_frt*sin(hedg);\n\t\t\t//cout << \"x0 next:\" << x0 << \" \";//simulation\n\t\t\t//cout << \"y0 next:\" << y0 << \" \";//simulation\n\t\t\t//cout << \"hedg next:\" << hedg/3.14f*180.0f << \"\\n\";//simulation\n\t\t\t//cout << \"x0_frt next:\" << x0_frt << \" \";//simulation\n\t\t\t//cout << \"y0_frt next:\" << y0_frt << \"\\n\";//simulation\n\t\t\tcout << \"--------------------------------------------------->>>>>> str_step = \" << str_step << \"\\n\";\n\t\t}\n\n\t\t// ros::spinOnce();\n\n\t\trate.sleep();\n\t}\n\treturn 0;\n\n\n}\n" }, { "alpha_fraction": 0.4820322096347809, "alphanum_fraction": 0.51760333776474, "avg_line_length": 29.96839714050293, "blob_id": "d960b6dd180913dac308628a4e65d1025d1382bf", "content_id": "0c631ed0e4ef2f071708103aeb045ac77a0aedf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 13719, "license_type": "no_license", "max_line_length": 116, "num_lines": 443, "path": "/src/control/src/gnss.cpp", "repo_name": "skywalker610/Autonomous-Driving-Control", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n\n#include \"std_msgs/MultiArrayLayout.h\"\n#include \"std_msgs/MultiArrayDimension.h\"\n#include \"std_msgs/UInt8MultiArray.h\"\n#include \"std_msgs/Float32MultiArray.h\"\n#include \"std_msgs/Float64MultiArray.h\"\n#include \"std_msgs/Float32.h\"\n\n#include \"std_msgs/Header.h\"\n#include \"sensor_msgs/NavSatStatus.h\"\n#include \"sensor_msgs/NavSatFix.h\"\n\n#include \"sensor_msgs/Imu.h\"\n\n#include \"dbw/FloatArray.h\"\n#include \"dbw/GPSFix.h\"\n#include \"dbw/GPSStatus.h\"\n\n#include <sstream>\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#include <net/if.h>\n#include <sys/ioctl.h>\n#include <sys/socket.h>\n\n#include <linux/can.h>\n#include <linux/can/raw.h>\n\n//#include <thread>\n\n#include <fstream>\n#include <iostream>\n#include <thread>\n\nusing namespace std;\n\nint s, i;\nint nbytes;\nstruct sockaddr_can addr;\nstruct ifreq ifr;\nstruct can_frame frame;\nstd_msgs::Float64MultiArray posArray;\nstd_msgs::Float32MultiArray accArray;\nstd_msgs::Float32MultiArray gyrArray;\nstd_msgs::Float32MultiArray angArray;\nstd_msgs::Float32 altitude;\ndbw::FloatArray NumFloat;\ndbw::GPSFix gpsfix;\nsensor_msgs::Imu imumsg;\nsensor_msgs::NavSatFix navMsg;\nsensor_msgs::NavSatStatus navStatus;\n\ndouble pos[2] = {0.0, 0.0};\nfloat acc[3] = {0.0, 0.0, 0.0};\nfloat gyr[3] = {0.0, 0.0, 0.0};\nfloat ang[3] = {0.0, 0.0, 0.0};\n//float alt = 0.0;\nuint8_t gps_ststus = 0;\n\nbool hed_up = false;\nbool pos_up = false;\nbool acc_xy_up = false,acc_z_up = false, acc_up = false;\nbool gyr_xy_up = false, gyr_z_up = false, gyr_up = false;\nbool alt_up = false;\n\nfloat gravity = 9.8;\nfloat deg2rad = 3.14159265359/180;\n\nbool write_file = true;\n\n/**\n * This tutorial demonstrates simple transmitting CAN messages over the ROS system.\n */\n\nint can_read(void)\n{\n //printf(\"CAN Sockets Receive Demo\\r\\n\");\n\n\tif ((s = socket(PF_CAN, SOCK_RAW, CAN_RAW)) < 0) {\n\t\tperror(\"Socket\");\n\t\treturn 1;\n\t}\n\n\tstrcpy(ifr.ifr_name, \"can0\" );\n\tioctl(s, SIOCGIFINDEX, &ifr);\n\n\tmemset(&addr, 0, sizeof(addr));\n\taddr.can_family = AF_CAN;\n\taddr.can_ifindex = ifr.ifr_ifindex;\n\n if (bind(s, (struct sockaddr *)&addr, sizeof(addr)) < 0) {\n perror(\"Bind\");\n return 1;\n }\n\n // printf(\"debug/n\");\n\n while (true)\n {\n nbytes = read(s, &frame, sizeof(struct can_frame));\n\n if (nbytes < 0) {\n perror(\"Read\");\n return 1;\n }\n\n // printf(\"0x%03X [%d] \",frame.can_id, frame.can_dlc);\n\n switch (frame.can_id)\n {\n case 0x10b:\n {\n uint16_t heading = frame.data[0] | frame.data[1]<<8;\n int16_t pitch = frame.data[2] | frame.data[3]<<8;\n int16_t roll = frame.data[4] | frame.data[5]<<8;\n\n ang[0] = float(heading/100.0);\n ang[1] = float(pitch/100.0);\n ang[2] = float(roll/100.0);\n\n hed_up = true;\n\n break;\n }\n\n case 0x20b:\n {\n int32_t lat = frame.data[0] | frame.data[1]<<8 | frame.data[2]<<16 | frame.data[3]<<24;\n int32_t lon = frame.data[4] | frame.data[5]<<8 | frame.data[6]<<16 | frame.data[7]<<24;\n\n pos[0] = double(lat/1e7);//double(lat/1e7-90.0);\n pos[1] = double(lon/1e7);//double(lon/1e7-180.0);\n\n pos_up = true;\n\n break;\n }\n\n case 0x30b:\n {\n int32_t altitude_tem = frame.data[0] | frame.data[1]<<8 | frame.data[2]<<16 | frame.data[3]<<24;\n uint8_t baseline = frame.data[4];\n\n altitude.data = float(altitude_tem/1000.0);\n\n alt_up = true;\n\n break;\n }\n\n case 0x60b:\n {\n int32_t gyr_x = frame.data[0] | frame.data[1]<<8 | frame.data[2]<<16 | frame.data[3]<<24;\n int32_t gyr_y = frame.data[4] | frame.data[5]<<8 | frame.data[6]<<16 | frame.data[7]<<24;\n\n gyr[0] = float(gyr_x/100000.0)*deg2rad;\n gyr[1] = float(gyr_y/100000.0)*deg2rad;\n\n gyr_xy_up = true;\n\n break;\n }\n\n case 0x70b:\n {\n int32_t gyr_z = frame.data[0] | frame.data[1]<<8 | frame.data[2]<<16 | frame.data[3]<<24;\n\t\t int32_t acc_z = frame.data[4] | frame.data[5]<<8 | frame.data[6]<<16 | frame.data[7]<<24;\n\n gyr[2] = float(gyr_z/100000.0)*deg2rad;\n\t\t acc[2] = float(acc_z/100000.0);\n\n gyr_z_up = true;\n\t\t acc_z_up = true;\n\n break;\n }\n\n case 0x50b:\n {\n // uint32_t acc_x = frame.data[0] | frame.data[1]<<8 | (frame.data[2] & 0xF0)<<12;\n // uint32_t acc_y = (frame.data[2] & 0x0F) | frame.data[3]<<4 | frame.data[4]<<12;\n // uint32_t acc_z = frame.data[5] | frame.data[6]<<8 | (frame.data[7] & 0xF0)<<12;\n\n int32_t acc_x = frame.data[0] | frame.data[1]<<8 | frame.data[2]<<16 | frame.data[3]<<24;\n int32_t acc_y = frame.data[4] | frame.data[5]<<8 | frame.data[6]<<16 | frame.data[7]<<24;\n //int32_t acc_z = frame.data[5] | frame.data[6]<<8 | (frame.data[7] & 0x0F)<<16;\n\n acc[0] = float(acc_x/100000.0);\n acc[1] = float(acc_y/100000.0);\n //acc[2] = float(acc_z/10000.0-50.0)*gravity;\n\n\n acc_xy_up = true;\n\n break;\n }\n \n case 0x31b:\n {\n gps_ststus = frame.data[4] & 0x0F;\n }\n\n default:\n break;\n }\n if (gyr_xy_up && gyr_z_up)\n {\n gyr_xy_up = false;\n gyr_z_up = false;\n\n gyr_up = true;\n }\n\tif (acc_xy_up && acc_z_up)\n {\n acc_xy_up = false;\n acc_z_up = false;\n\n acc_up = true;\n }\n }\n\n if (close(s) < 0) {\n perror(\"Close\");\n return 1;\n }\n\n}\n\nint main(int argc, char **argv)\n{\n /**\n * The ros::init() function needs to see argc and argv so that it can perform\n * any ROS arguments and name remapping that were provided at the command line.\n * For programmatic remappings you can use a different version of init() which takes\n * remappings directly, but for most command-line programs, passing argc and argv is\n * the easiest way to do it. The third argument to init() is the name of the node.\n *\n * You must call one of the versions of ros::init() before using any other\n * part of the ROS system.\n */\n\n ofstream outfile;\n outfile.open(\"position.dat\");\n\n ofstream accfile;\n accfile.open(\"acceleration.dat\");\n\n ros::init(argc, argv, \"msg_transmit\");\n\n /**\n * NodeHandle is the main access point to communications with the ROS system.\n * The first NodeHandle constructed will fully initialize this node, and the last\n * NodeHandle destructed will close down the node.\n */\n ros::NodeHandle n;\n\n /**\n * The advertise() function is how you tell ROS that you want to\n * publish on a given topic name. This invokes a call to the ROS\n * master node, which keeps a registry of who is publishing and who\n * is subscribing. After this advertise() call is made, the master\n * node will notify anyone who is trying to subscribe to this topic name,\n * and they will in turn negotiate a peer-to-peer connection with this\n * node. advertise() returns a Publisher object which allows you to\n * publish messages on that topic through a call to publish(). Once\n * all copies of the returned Publisher object are destroyed, the topic\n * will be automatically unadvertised.\n *\n * The second parameter to advertise() is the size of the message queue\n * used for publishing messages. If messages are published more quickly\n * than we can send them, the number here specifies how many messages to\n * buffer up before throwing some away.\n */\n ros::Publisher GNSSmsg_pub = n.advertise<std_msgs::Float64MultiArray>(\"gnss_msg\", 3);\n ros::Publisher ACCEmsg_pub = n.advertise<std_msgs::Float32MultiArray>(\"acc_msg\", 3);\n ros::Publisher GYROmsg_pub = n.advertise<std_msgs::Float32MultiArray>(\"gyr_msg\", 3);\n ros::Publisher ANGLmsg_pub = n.advertise<std_msgs::Float32MultiArray>(\"ang_msg\", 3);\n ros::Publisher ALTImsg_pub = n.advertise<std_msgs::Float32>(\"alt_msg\", 3);\n\n ros::Publisher GPSmsg_pub = n.advertise<dbw::GPSFix>(\"gpsfix_msg\", 3);\n ros::Publisher IMUmsg_pub = n.advertise<sensor_msgs::Imu>(\"imu_msg\", 3);\n\n ros::Publisher NAVImsg_pub = n.advertise<sensor_msgs::NavSatFix>(\"nav_msg\", 3);\n\n thread CANread(can_read);\n\n ros::Rate loop_rate(30);\n while (ros::ok())\n {\n posArray.data.clear();\n accArray.data.clear();\n gyrArray.data.clear();\n angArray.data.clear();\n\n //printf(\"0x%03X [%d] \",frame.can_id, frame.can_dlc);\n\n // can_read();\n\n // printf(\"0x%03X [%d] \",frame.can_id, frame.can_dlc);\n\n // for (i = 0; i < frame.can_dlc; i++)\n // printf(\"%02X \",frame.data[i]);\n\n // printf(\"\\r\\n\");\n\n\n\n if (pos_up && hed_up && acc_up && gyr_up && alt_up)\n {\n pos_up = false;\n hed_up = false;\n acc_up = false;\n gyr_up = false;\n alt_up = false;\n\n /** Heading/Pitch/Roll **/\n for (size_t i = 0; i < 3; i++)\n {\n angArray.data.push_back(ang[i]);\n }\n\n ROS_INFO(\"heading: %0.4f, pitch: %0.4f, roll: %0.4f;\",ang[0], ang[1], ang[2]);\n ANGLmsg_pub.publish(angArray);\n\n /** Position **/\n for (size_t i = 0; i < 2; i++)\n {\n posArray.data.push_back(pos[i]);\n }\n\n ROS_INFO(\"lat: %0.7f, lon: %0.7f;\",pos[0], pos[1]);\n GNSSmsg_pub.publish(posArray);\n\n if (write_file)\n {\n // write inputted data into the file.\n outfile << std::fixed << std::setprecision(9) << pos[0] ;\n outfile << \"; \";\n outfile << std::fixed << std::setprecision(9) << pos[1]<< endl;\n }\n \n /** Altitude / 0.01 m **/\n ROS_INFO(\"altitude: %f;\",altitude.data);\n\n\t\t\t\t\t\tALTImsg_pub.publish(altitude);\n\n /** Angular rate / rad/s (raw 0.0001 deg/s) **/\n for (size_t i = 0; i < 3; i++)\n {\n gyrArray.data.push_back(gyr[i]);\n }\n\n ROS_INFO(\"gyr_x: %0.4f, gyr_y: %0.4f, gyr_z: %0.4f;\",gyr[0], gyr[1], gyr[2]);\n\n GYROmsg_pub.publish(gyrArray);\n\n /** Acceleration / m/s (raw 0.0001 g) **/\n for (size_t i = 0; i < 3; i++)\n {\n accArray.data.push_back(acc[i]);\n }\n\n ROS_INFO(\"acc_x: %0.4f, acc_y: %0.4f, acc_z: %0.4f;\",acc[0], acc[1], acc[2]);\n\n ACCEmsg_pub.publish(accArray);\n\n gpsfix.header.frame_id = \"gpsfix\";\n gpsfix.header.stamp = ros::Time::now();\n\n gpsfix.latitude = pos[0];\n gpsfix.longitude = pos[1];\n gpsfix.altitude = altitude.data;\n\n gpsfix.track = ang[0];\n gpsfix.pitch = ang[1];\n gpsfix.roll = ang[2];\n\n GPSmsg_pub.publish(gpsfix);\n\n //calculate orientation\n double yaw = ang[0]*deg2rad;\n double pitch_rad = ang[1]*deg2rad;\n double roll_rad = ang[2]*deg2rad;\n double cy = cos(yaw * 0.5);\n double sy = sin(yaw * 0.5);\n double cp = cos(pitch_rad * 0.5);\n double sp = sin(pitch_rad * 0.5);\n double cr = cos(roll_rad * 0.5);\n double sr = sin(roll_rad * 0.5);\n imumsg.header.frame_id = \"imu\";\n imumsg.header.stamp = ros::Time::now();\n imumsg.linear_acceleration.x = acc[0];\n imumsg.linear_acceleration.y = acc[1];\n imumsg.linear_acceleration.z = acc[2];\n imumsg.angular_velocity.x = gyr[0];\n imumsg.angular_velocity.y = gyr[1];\n imumsg.angular_velocity.z = gyr[2];\n\n imumsg.orientation.w = cr * cp * cy + sr * sp * sy;\n imumsg.orientation.x = sr * cp * cy - cr * sp * sy;\n imumsg.orientation.y = cr * sp * cy + sr * cp * sy;\n imumsg.orientation.z = cr * cp * sy - sr * sp * cy;\n\n IMUmsg_pub.publish(imumsg); \n\n navMsg.header.frame_id = \"gps_navfix\";\n navMsg.header.stamp = ros::Time::now();\n\n navStatus.status = gps_ststus;\n\n navMsg.status.status = navStatus.status;\n navMsg.status.service = navStatus.SERVICE_GPS;\n\n navMsg.latitude = pos[0];\n navMsg.longitude = pos[1];\n navMsg.altitude = altitude.data;\n\n NAVImsg_pub.publish(navMsg); \n\n if (write_file)\n {\n accfile << std::fixed << std::setprecision(4) << acc[0];\n accfile << \"; \";\n accfile << std::fixed << std::setprecision(4) << acc[1];\n accfile << \"; \";\n accfile << std::fixed << std::setprecision(4) << acc[2] << endl;\n }\n\n ros::spinOnce();\n loop_rate.sleep();\n //ros::Duration(1).sleep();\n //usleep(1000000);\n }\n //ros::spinOnce();\n //loop_rate.sleep();\n }\n}\n" } ]
8
dspice2003/prg1
https://github.com/dspice2003/prg1
2ff77f546cdc2ed88987d0667e107b38ffbb5a04
3b858168529e395da7b9347675d0f54c4e8cc794
4eac6fce24d835cadf90271204f0f18b471fb5c8
refs/heads/master
2021-05-11T06:47:37.522907
2018-01-18T15:59:13
2018-01-18T15:59:13
118,000,007
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 27, "blob_id": "1533d05d9cb8409b7e4d7c332cce004b2f20772d", "content_id": "ad9d4e72dae3a4d41abc0d673147ce7ebc8704f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/prg1.py", "repo_name": "dspice2003/prg1", "src_encoding": "UTF-8", "text": "isogram = input(\"please enter a number\")\nprint(isogram)\n" } ]
1
MargoRaijmakers/Live_demo_github
https://github.com/MargoRaijmakers/Live_demo_github
66b098322e5d9970579d3c2c4b930271d9ca707c
92f905bfed99719d53c301dc40b56e7eaede0583
e570a9ee2955e1fe7c2af2a1b59e070e36ec9345
refs/heads/master
2022-12-26T19:04:12.600109
2020-06-03T16:05:50
2020-06-03T16:05:50
261,979,347
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7215686440467834, "alphanum_fraction": 0.7215686440467834, "avg_line_length": 27.22222137451172, "blob_id": "0f0f6afe093fa469a5522c194cf5aeff18194c12", "content_id": "b645038a63f2d6f23ac8a38db647b55b60b33208", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/Github.py", "repo_name": "MargoRaijmakers/Live_demo_github", "src_encoding": "UTF-8", "text": "# file, settings, version control, github, +\n# maak nieuwe repository: + bij foto, new repository\n\n# terminal in pycharm\n#git init --> initialise .git\n#git add [file location/filename]\n#git commit\n#git remote add [naam] [url repository]\n#git push [name]\n\n" } ]
1
djbanach/crm-project
https://github.com/djbanach/crm-project
e540420f8568ad7210a7fc91603b938f6c356c27
de3be9d66a8b9114790a489c5e978a7771d72858
d4d089d729effb5b887744d4780aac22816d9dfa
refs/heads/master
2018-11-05T14:38:37.159606
2018-08-27T16:29:37
2018-08-27T16:29:37
146,251,419
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6716232895851135, "alphanum_fraction": 0.7063196897506714, "avg_line_length": 28.851852416992188, "blob_id": "46c39b80ed6dac1597174f7ac0b42dfd5256bcc1", "content_id": "8315af8011b5112101106baf3834c5fef2545127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 807, "license_type": "no_license", "max_line_length": 118, "num_lines": 27, "path": "/README.md", "repo_name": "djbanach/crm-project", "src_encoding": "UTF-8", "text": "# crm-project\nA customer relationship management app utilizing a React front-end and a Django REST API\n\nThe main point of the project is to get a better understanding of both React and Django.\nThe idea came after working as a salesman at a car dealership where they utilize a CRM app and\nthis tutorial (https://www.valentinog.com/blog/tutorial-api-django-rest-react/) where most of the lead app comes from.\n\n## Lead app\n\nA simple app that utilizes both get and post requests\n\n### Schema\n\n```JSON\n[\n {\n \"name\": \"John\",\n \"email\": \"JohnDoe@fake.com\",\n \"phone_number\": \"1234567890\",\n \"message\": \"Is this car still avaliable?\",\n \"car\": \"2004 Honda Accord\",\n \"created_at\": \"2018-01-14 00:00:00\"\n }\n]\n```\n\n**I eventually want to add both a vehical database to store the cars and an employee database**\n\n" }, { "alpha_fraction": 0.7610389590263367, "alphanum_fraction": 0.784415602684021, "avg_line_length": 34, "blob_id": "f1f0cb979daccd1343b61b821f6d4dac8c2876ad", "content_id": "43d0095274159f6c5dc74461a6384c153623e1eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "no_license", "max_line_length": 58, "num_lines": 11, "path": "/project/leads/models.py", "repo_name": "djbanach/crm-project", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom phonenumber_field.modelfields import PhoneNumberField\n\n# Create your models here.\nclass Lead(models.Model):\n\tname = models.CharField(max_length=100)\n\temail = models.EmailField()\n\tphone_number = PhoneNumberField()\n\tmessage = models.CharField(max_length=300)\n\tcar = models.CharField(max_length=100)\n\tcreated_at = models.DateTimeField(auto_now_add=True)\n" } ]
2
aesavas/HackerRank
https://github.com/aesavas/HackerRank
66b29adfbfe47d9bd8cdbccc8b4d3aa52e69441a
c1ffecc92e3e3db923f94594b9115f650dc2983a
4c89c922a3d22c4c9d8f7419d0bbccb693e6b215
refs/heads/master
2021-01-05T10:05:57.996492
2020-09-29T22:47:48
2020-09-29T22:47:48
240,984,986
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6120826601982117, "alphanum_fraction": 0.6279809474945068, "avg_line_length": 26.34782600402832, "blob_id": "806331e0528915ee173dc093b5707ac207f9ad20", "content_id": "fefb325a423cf81a874b6dda6146cbc7535e371a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "permissive", "max_line_length": 83, "num_lines": 23, "path": "/Project Euler+/Problem 002/Solution 2.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\nauthor : Ali Emre SAVAS\nLink : https://www.hackerrank.com/contests/projecteuler/challenges/euler002/problem\n\"\"\"\n\n\ndef sumOfFibonacci(maxRange):\n # nextValue gives us the next term of sequence\n firstValue, secondValue, nextValue = 0, 1, 0\n sumOfEven = 0\n while(nextValue <= maxRange):\n if nextValue % 2 == 0:\n sumOfEven += nextValue\n firstValue, secondValue = secondValue, nextValue\n nextValue = firstValue + secondValue\n return sumOfEven\n\n\nif __name__ == \"__main__\":\n t = int(input().strip())\n for a0 in range(t):\n n = int(input().strip())\n print(sumOfFibonacci(n))\n" }, { "alpha_fraction": 0.6487341523170471, "alphanum_fraction": 0.6645569801330566, "avg_line_length": 16.55555534362793, "blob_id": "fb10ce4ca3d9f789b7dc4c99ea4e38918c4ddf35", "content_id": "a711c29012c6449616bd720c5a5c21339872f89b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "permissive", "max_line_length": 70, "num_lines": 18, "path": "/30 Days of Code/Day 01 - Data Types/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n auhtor : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-data-types/problem\n\"\"\"\ni = 4\nd = 4.0\ns = 'HackerRank '\n\nintNumber = int(input())\nfloatNumber = float(input())\nsentece = input()\n\nresult = i + intNumber\nprint(result)\nresult = d + floatNumber\nprint(result)\nresult = s + sentece\nprint(result)\n" }, { "alpha_fraction": 0.5962343215942383, "alphanum_fraction": 0.6004183888435364, "avg_line_length": 30.866666793823242, "blob_id": "234e54628451ffb05dca0a5b5e74e1d8415886dc", "content_id": "7c038dbeb91a692ef1a3a8d62428b9d14d9af0f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "permissive", "max_line_length": 79, "num_lines": 15, "path": "/Python/Basic Data Types/Finding The Percentage/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/finding-the-percentage/problem\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n student_marks = {}\n for _ in range(n):\n name, *line = input().split()\n scores = list(map(float, line))\n student_marks[name] = scores\n query_name = input()\n calculateAverageList = student_marks[query_name]\n average = format(float(sum(calculateAverageList))/3, '.2f')\n print(average)\n" }, { "alpha_fraction": 0.5575079917907715, "alphanum_fraction": 0.5734824538230896, "avg_line_length": 25.08333396911621, "blob_id": "03c07e3dbc61a6db3b2e91065a9ae61fc8e10374", "content_id": "616f06f4741c98975f4ad189c06a1d9ed8f72df9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "permissive", "max_line_length": 73, "num_lines": 24, "path": "/Python/Strings/Alphabet Rangoli/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/alphabet-rangoli/problem\n\"\"\"\nimport string\n\n\ndef print_rangoli(size):\n # This blank for\n alphabet = tuple(\" \"+string.ascii_lowercase)\n lineSize = n * 4 - 3\n # UPPER SECTION\n for i in range(size, 0, -1):\n row = \"-\".join(alphabet[size:i:-1] + alphabet[i:size+1])\n print(row.center(lineSize, \"-\"))\n # BOTTOM SECTION\n for i in range(2, size+1):\n row = \"-\".join(alphabet[size:i:-1] + alphabet[i:size+1])\n print(row.center(lineSize, \"-\"))\n\n\nif __name__ == '__main__':\n n = int(input())\n print_rangoli(n)\n" }, { "alpha_fraction": 0.686956524848938, "alphanum_fraction": 0.686956524848938, "avg_line_length": 22, "blob_id": "daa152a6601a16188769b2a183735f5e9938d87a", "content_id": "c0e1fb1a2e3f061fc6a32fb8b61afcd122536031", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "permissive", "max_line_length": 67, "num_lines": 5, "path": "/Python/Introduction/Say Hello World with Python/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\nauthor : Ali Emre SAVAS\nLink : https://www.hackerrank.com/challenges/py-hello-world/problem\n\"\"\"\nprint(\"Hello, World!\")\n" }, { "alpha_fraction": 0.5364963412284851, "alphanum_fraction": 0.5364963412284851, "avg_line_length": 26.399999618530273, "blob_id": "fd6d59240e892ffb2ad0f590145f9db8d98577d7", "content_id": "2833948346e71bfb0613d68579c3cd222d132b80", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "permissive", "max_line_length": 90, "num_lines": 10, "path": "/Python/Math/Integer Come In All Sizes/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-integers-come-in-all-sizes/problem\n\"\"\"\nif __name__ == \"__main__\":\n a = int(input())\n b = int(input())\n c = int(input())\n d = int(input())\n print(\"{}\".format(pow(a, b)+pow(c, d)))\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.52173912525177, "avg_line_length": 27.75, "blob_id": "b0b6f0c1c60e93c0be82ff3fde5c839de9d74efa", "content_id": "3e6de8a10c3548720560cbf523928afcd09bdd7c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "permissive", "max_line_length": 74, "num_lines": 8, "path": "/Python/Math/Mod Divmod/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-mod-divmod/problem\n\"\"\"\nif __name__ == \"__main__\":\n a = int(input())\n b = int(input())\n print(\"{}\\n{}\\n{}\".format(a//b, a % b, divmod(a, b)))\n" }, { "alpha_fraction": 0.6243032217025757, "alphanum_fraction": 0.6276476979255676, "avg_line_length": 26.18181800842285, "blob_id": "5180a293b0b668d7942aa5a9abea45e6ada344f2", "content_id": "121760be5c28e20bb30a32a4885bc7cb15689069", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 897, "license_type": "permissive", "max_line_length": 93, "num_lines": 33, "path": "/Python/Strings/The Minion Game/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/the-minion-game/problem\n\"\"\"\n\"\"\"\nThe logic is finding number of substring which starts with vowels and consonants.\nFor example: BANANA\nFirst letter is B. Now, find substring which starts with B.\nB,BA,BAN,BANA,BANAN,BANANA ==> 6 substring.\nPurpose of (len(string) - i) is to find number of substring which starts with current letter.\n\"\"\"\n\n\ndef minion_game(string):\n kevinScore = 0\n stuartScore = 0\n for i in range(len(string)):\n if string[i] in \"AEIOU\":\n kevinScore += (len(string)-i)\n else:\n stuartScore += (len(string)-i)\n\n if kevinScore > stuartScore:\n print(\"Kevin {}\".format(kevinScore))\n elif kevinScore < stuartScore:\n print(\"Stuart {}\".format(stuartScore))\n else:\n print(\"Draw\")\n\n\nif __name__ == '__main__':\n s = input()\n minion_game(s)\n" }, { "alpha_fraction": 0.6010230183601379, "alphanum_fraction": 0.6138107180595398, "avg_line_length": 20.72222137451172, "blob_id": "0773f37e2dca07fecce8e73eca2f69eac54daa47", "content_id": "7659d39238241feab1166bb4a60a01fbdb8b511b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "permissive", "max_line_length": 119, "num_lines": 18, "path": "/30 Days of Code/Day 09 - Recursion 3/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-recursion/problem\n\"\"\"\n\n# If you use this solution in Hackerrank solution, just copy the function. Because main part is different from website.\n\n\ndef factorial(n):\n if n == 1:\n return 1\n else:\n return n * factorial(n-1)\n\n\nif __name__ == '__main__':\n n = int(input())\n result = factorial(n)\n" }, { "alpha_fraction": 0.5359539985656738, "alphanum_fraction": 0.5407478213310242, "avg_line_length": 24.292682647705078, "blob_id": "0062d85b6b578e651f1119605890d57474853a00", "content_id": "712db3467ca0116a1a3f9e2fade986ab7b8d3bec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1043, "license_type": "permissive", "max_line_length": 79, "num_lines": 41, "path": "/30 Days of Code/Day 22 - Binary Search Trees/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-binary-search-trees/problem\n\"\"\"\n\nclass Node:\n def __init__(self,data):\n self.right=self.left=None\n self.data = data\nclass Solution:\n def insert(self,root,data):\n if root==None:\n return Node(data)\n else:\n if data<=root.data:\n cur=self.insert(root.left,data)\n root.left=cur\n else:\n cur=self.insert(root.right,data)\n root.right=cur\n return root\n\n def getHeight(self,node):\n if node is None: \n return -1\n else:\n leftSide = self.getHeight(node.left) \n rightSide = self.getHeight(node.right)\n if (leftSide > rightSide): \n return leftSide+1\n else: \n return rightSide+1\n\nT=int(input())\nmyTree=Solution()\nroot=None\nfor i in range(T):\n data=int(input())\n root=myTree.insert(root,data)\nheight=myTree.getHeight(root)\nprint(height) " }, { "alpha_fraction": 0.536912739276886, "alphanum_fraction": 0.5503355860710144, "avg_line_length": 26.090909957885742, "blob_id": "b27e18cc27554d3d65554020e1a7bf008c659d9b", "content_id": "97688b09bfb61fdf6e74155a50fefad44a0d8884", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "permissive", "max_line_length": 69, "num_lines": 11, "path": "/Python/Introduction/Python Print/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-print/problem\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n number = \"\"\n # n+1 -> Because in range function, n+1 is not in the range.\n for i in range(1, n+1):\n number += str(i)\n print(number)\n" }, { "alpha_fraction": 0.60047847032547, "alphanum_fraction": 0.60047847032547, "avg_line_length": 22.22222137451172, "blob_id": "cec97b8fee2e6d4ae360b98cc8f8dfe261514f72", "content_id": "23fb5eed02e074131c3e4705d23fbafabdca78c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "permissive", "max_line_length": 100, "num_lines": 18, "path": "/Python/Strings/Capitalize/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/capitalize/problem\n\"\"\"\n\n\n# If you want to use in Hackerrank, just copy the function. Main section is different in Hackerrank.\ndef solve(s):\n name = s.split(\" \")\n for i in range(len(name)):\n name[i] = name[i].capitalize()\n return \" \".join(name)\n\n\nif __name__ == '__main__':\n s = input()\n result = solve(s)\n print(result)\n" }, { "alpha_fraction": 0.5612244606018066, "alphanum_fraction": 0.5612244606018066, "avg_line_length": 21.69230842590332, "blob_id": "3e4476a40a9fbbb6784944457202189aac12935b", "content_id": "5117279cdfe3898eabeea61f2051ae13638c9712", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 294, "license_type": "permissive", "max_line_length": 72, "num_lines": 13, "path": "/Java/Introduction/Welcome to Java!/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/welcome-to-java/problem\n ------------------------\n*/\n\n\npublic class Solution{\n public static void main(String[] args) {\n System.out.println(\"Hello, World.\");\n System.out.println(\"Hello, Java.\");\n }\n}" }, { "alpha_fraction": 0.5944334268569946, "alphanum_fraction": 0.5944334268569946, "avg_line_length": 24.149999618530273, "blob_id": "7a20a1b1383cfe2a2f2f9663f48d55532d20b73e", "content_id": "f8a87eff0fafa0e5fbd8813d5c43ca25bc2245da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 503, "license_type": "permissive", "max_line_length": 72, "num_lines": 20, "path": "/Java/BigNumber/Java BigInteger/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/java-biginteger/problem\n ------------------------\n*/\nimport java.io.*;\nimport java.util.*;\nimport java.math.BigInteger;\n\npublic class Solution {\n\n public static void main(String[] args) {\n Scanner scan = new Scanner(System.in);\n BigInteger A = scan.nextBigInteger();\n BigInteger B = scan.nextBigInteger();\n System.out.println(A.add(B));\n System.out.println(A.multiply(B));\n \n }\n}\n" }, { "alpha_fraction": 0.5403929948806763, "alphanum_fraction": 0.5469432473182678, "avg_line_length": 35.63999938964844, "blob_id": "8c1c2cce3e09a3ac452605d82f5cb151dc5b0b3b", "content_id": "14460743c84a5eb768e7b082fa3d05eb85714f5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 916, "license_type": "permissive", "max_line_length": 69, "num_lines": 25, "path": "/Python/Basic Data Types/Python Lists/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-lists/problem\n\"\"\"\nif __name__ == '__main__':\n N = int(input())\n listOfNumbers = list()\n for _ in range(N):\n process = list(map(str, input().split()))\n process_type = process[0].lower()\n if(process_type == \"append\"):\n listOfNumbers.append(int(process[1]))\n elif(process_type == \"insert\"):\n listOfNumbers.insert(int(process[1]), int(process[2]))\n elif(process_type == \"remove\"):\n if(int(process[1]) in listOfNumbers):\n listOfNumbers.remove(int(process[1]))\n elif(process_type == \"sort\"):\n listOfNumbers.sort()\n elif(process_type == \"pop\"):\n listOfNumbers.pop()\n elif(process_type == \"reverse\"):\n listOfNumbers.reverse()\n elif(process_type == \"print\"):\n print(listOfNumbers)\n" }, { "alpha_fraction": 0.3706020414829254, "alphanum_fraction": 0.6465989351272583, "avg_line_length": 26.80434799194336, "blob_id": "ada4a503fd8d3cab23c46048058ff3997e431f5b", "content_id": "094f4d3fc5a301fe974c2371326ccf8c24e55359", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1279, "license_type": "permissive", "max_line_length": 111, "num_lines": 46, "path": "/Python/Math/Triangle Quest 2/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/triangle-quest-2/editorial\n\"\"\"\n# For this question, this is my best solution. It is not correct but I can do only this :D\nfor i in range(1, int(input())+1):\n print(*(list(range(1, i+1))+list(range(i-1, 0, -1))))\n\n\n\"\"\"\nThere is a math trick in this question. I did not know that. If you do not know it, probably you can not do it.\nIntegers greater than 0 and less than 10:\n(10**1)//9 = 1\n(10**2)//9 = 11\n(10**3)//9 = 111\n(10**4)//9 = 1111\n(10**5)//9 = 11111\n(10**6)//9 = 111111\n(10**7)//9 = 1111111\n(10**8)//9 = 11111111\n(10**9)//9 = 111111111\n \n and \n\n 1 * 1 = 1\n 11 * 11 = 121\n 111 * 111 = 12321\n 1111 * 1111 = 1234321\n 11111 * 11111 = 123454321\n 111111 * 111111 = 12345654321\n 1111111 * 1111111 = 1234567654321\n 11111111 * 11111111 = 123456787654321\n 111111111 * 111111111 = 12345678987654321\n\nSo, we can solve like this if we know this math trick\n\"\"\"\nfor i in range(1, int(input())+1):\n print(int((10**i//9)**2))\n\n\"\"\"\nAnd also, another solution that the owner of the problem offer like this:\n(I think, he is joking to everyone :D)\n\"\"\"\nfor i in range(1, int(input())+1):\n print([1, 121, 12321, 1234321, 123454321, 12345654321,\n 1234567654321, 123456787654321, 12345678987654321][i-1])\n" }, { "alpha_fraction": 0.6099815368652344, "alphanum_fraction": 0.6136783957481384, "avg_line_length": 29.05555534362793, "blob_id": "5512d15d542b595d912e0d6c209c84fe04d3d2e2", "content_id": "9c7b49791af463b8ec45f58ad1373461268b4067", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "permissive", "max_line_length": 125, "num_lines": 18, "path": "/Python/Strings/Text Wrap/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/text-wrap/problem\n\"\"\"\n\n\ndef wrap(string, max_width):\n newString = \"\"\n for i in range(0, len(string), max_width):\n newString += string[i:(i+max_width)] + \"\\n\" # \\n for new line.\n # newString = newString[:(len(newString)-1)] If you do not want to print last blank line, you can add this line for this.\n return newString\n\n\nif __name__ == '__main__':\n string, max_width = input(), int(input())\n result = wrap(string, max_width)\n print(result)\n" }, { "alpha_fraction": 0.5659777522087097, "alphanum_fraction": 0.5802861452102661, "avg_line_length": 27.590909957885742, "blob_id": "0159811e9cdc4c29803594acdea046898bea51e3", "content_id": "a4472d1f187234154fc4b0e69b03aa4dac7548b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "permissive", "max_line_length": 74, "num_lines": 22, "path": "/30 Days of Code/Day 28 - RegEx, Patterns, and Intro to Databases/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-regex-patterns/problem\n\"\"\"\n\nimport re\n\nif __name__ == '__main__':\n N = int(input())\n names = list()\n regexName = \"[a-z]\"\n regexEmail = \"[a-z]{1,}@gmail.com\"\n for N_itr in range(N):\n firstNameEmailID = input().split()\n firstName = firstNameEmailID[0]\n emailID = firstNameEmailID[1]\n if(re.search(regexName, firstName) and len(firstName)<20):\n if (re.search(regexEmail, emailID) and len(emailID)<50):\n names.append(firstName)\n names.sort()\n for name in names:\n print(name)\n" }, { "alpha_fraction": 0.5053648352622986, "alphanum_fraction": 0.5439913868904114, "avg_line_length": 34.846153259277344, "blob_id": "f02069bfd31447d56ad6f4fe452610606407796b", "content_id": "b4a8d7327b33f6413d46b23536f87c8bf31f2ac1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 932, "license_type": "permissive", "max_line_length": 101, "num_lines": 26, "path": "/30 Days of Code/Day 11 - 2D Arrays/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-2d-arrays/problem\n\"\"\"\nif __name__ == '__main__':\n arr = []\n for _ in range(6):\n arr.append(list(map(int, input().rstrip().split())))\n\n result = -63 # -63 because sum can be between -63 and +63. Thus, -63 can be minimum value of sum\n \"\"\"\n There are 3 section in hourglass\n 1 1 1 -> Upper\n 1 -> Belt\n 1 1 1 -> Bottom\n So, We can use 3 counter in for loop. \n \"\"\"\n for x, y, z in zip(range(0, 4), range(1, 5), range(2, 6)):\n for i in range(4):\n upperSection = arr[x][i:i+3] # There are 3 item in Upper section\n belt = arr[y][i+1] # There is 1 item in Belt Section\n bottomSection = arr[z][i:i+3] # There are 3 item Bottom section\n temp = sum(upperSection) + belt + sum(bottomSection)\n if temp > result:\n result = temp\n print(result)\n" }, { "alpha_fraction": 0.6067677736282349, "alphanum_fraction": 0.6126021146774292, "avg_line_length": 33.279998779296875, "blob_id": "f1b6a30560469ade15c18c4ef54073817369881e", "content_id": "193792188873072ae3aae0355dd18721fe085a17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "permissive", "max_line_length": 78, "num_lines": 25, "path": "/Python/Basic Data Types/Nested List/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/nested-list/problem\n\"\"\"\nif __name__ == '__main__':\n students = list()\n # This part to take input for list\n for _ in range(int(input())):\n name = input()\n score = float(input())\n students.append([name, score])\n # List is sorted according to score.\n students.sort(key=lambda x: x[1])\n lowestScore = students[0][1]\n # This part to find second lowest score.\n for name, score in students:\n if score > lowestScore:\n secondLowestScore = score\n break\n # This filter part to find same score.\n printingList = list(filter(lambda x: x[1] == secondLowestScore, students))\n # Then, I sorted list according to names.\n printingList.sort(key=lambda x: x[0])\n for name, score in printingList:\n print(name)\n" }, { "alpha_fraction": 0.5952023863792419, "alphanum_fraction": 0.6011993885040283, "avg_line_length": 36.05555725097656, "blob_id": "77861263cc700514f0f170c211fd42ded1215b29", "content_id": "17cdb01c6afa830c4657f059ef02eb7b29df4cb1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 667, "license_type": "permissive", "max_line_length": 85, "num_lines": 18, "path": "/30 Days of Code/Day 08 - Dictionaries and Maps/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-dictionaries-and-maps/problem\n\"\"\"\nfrom sys import stdin # We need this module for reading unknown number of input.\nif __name__ == \"__main__\":\n phoneBook = dict()\n number = int(input())\n for i in range(number):\n person = tuple(map(str, input().split()))\n phoneBook[person[0]] = person[1]\n # The problem said that we do no know number of input. So, we read all of inputs.\n names = stdin.read().splitlines()\n for name in names:\n if name in phoneBook:\n print(\"{}={}\".format(name, phoneBook[name]))\n else:\n print(\"Not found\")\n" }, { "alpha_fraction": 0.671318531036377, "alphanum_fraction": 0.7771478295326233, "avg_line_length": 180.0263214111328, "blob_id": "0739e7ccee63482e5596987faac4ef8f5d5cb615", "content_id": "5c350b00af116c4356e98bac60887ed77e8c7dd0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6879, "license_type": "permissive", "max_line_length": 282, "num_lines": 38, "path": "/30 Days of Code/README.md", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "# 30 Days of Code\n\n| Day | Challenge | Points | Solution |\n|:---:|:---------:|:------:|:--------:|\n|0|[Hello World](https://www.hackerrank.com/challenges/30-hello-world/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2000%20-%20Hello%20World/solution.py)|\n|1|[Data Types](https://www.hackerrank.com/challenges/30-data-types/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2001%20-%20Data%20Types/solution.py)|\n|2|[Operators](https://www.hackerrank.com/challenges/30-operators/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2002%20-%20Operators/solution.py)|\n|3|[Introduction to Conditional Statements](https://www.hackerrank.com/challenges/30-conditional-statements/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2003%20-%20Introduction%20to%20Conditional%20Statements/solution.py)|\n|4|[Class and Instance](https://www.hackerrank.com/challenges/30-class-vs-instance/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2004%20-%20Class%20vs%20Instance/solution.py)|\n|5|[Loops](https://www.hackerrank.com/challenges/30-loops/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2005%20-%20Loops/solution.py)|\n|6|[Let's Review](https://www.hackerrank.com/challenges/30-review-loop/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2006%20-%20Let's%20Review/solution.py)|\n|7|[Arrays](hackerrank.com/challenges/30-arrays/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/tree/master/30%20Days%20of%20Code/Day%2007%20-%20Arrays)|\n|8|[Dictionaries and Maps](https://www.hackerrank.com/challenges/30-dictionaries-and-maps/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2008%20-%20Dictionaries%20and%20Maps/solution.py)|\n|9|[Recursion 3](https://www.hackerrank.com/challenges/30-recursion/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2009%20-%20Recursion%203/solution.py)|\n|10|[Binary Numbers](https://www.hackerrank.com/challenges/30-binary-numbers/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2010%20-%20Binary%20Numbers/solution.py)|\n|11|[2D Arrays](https://www.hackerrank.com/challenges/30-2d-arrays/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2011%20-%202D%20Arrays/solution.py)|\n|12|[Inheritance](https://www.hackerrank.com/challenges/30-inheritance/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2012%20-%20Inheritance/solution.py)|\n|13|[Abstract Classes](https://www.hackerrank.com/challenges/30-abstract-classes/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2011%20-%202D%20Arrays/solution.py)|\n|14|[Scope](https://www.hackerrank.com/challenges/30-scope/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2014%20-%20Scope/solution.py)|\n|15|[Linked List](https://www.hackerrank.com/challenges/30-linked-list/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2015%20-%20Linked%20List/solution.py)|\n|16|[Exceptions](https://www.hackerrank.com/challenges/30-exceptions-string-to-integer/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2016%20-%20Exceptions/solution.py)|\n|17|[More Exception](https://www.hackerrank.com/challenges/30-more-exceptions/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2017%20-%20More%20Exception/solution.py)|\n|18|[Queue and Stack](https://www.hackerrank.com/challenges/30-queues-stacks/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2018%20-%20Queue%20and%20Stack/solution/solution.py)|\n|19|[Interfaces](https://www.hackerrank.com/challenges/30-interfaces/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2019%20-%20Interfaces/solution.py)|\n|20|[Sorting](https://www.hackerrank.com/challenges/30-sorting/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2020%20-%20Sorting/solution.py)|\n|21|[Generics](https://www.hackerrank.com/challenges/30-generics/problem)|30|[solution.java](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2021%20-%20Generics/Generics.java)|\n|22|[Binary Search Trees](https://www.hackerrank.com/challenges/30-binary-search-trees/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2022%20-%20Binary%20Search%20Trees/solution.py)|\n|23|[BST Level-Order Traversal](https://www.hackerrank.com/challenges/30-binary-trees/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2023%20-%20BST%20Level-Order%20Traversal/solution.py)|\n|24|[More Linked Lists](https://www.hackerrank.com/challenges/30-linked-list-deletion/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2024%20-%20More%20Linked%20Lists/solution.py)|\n|25|[Running Time and Complexity](https://www.hackerrank.com/challenges/30-running-time-and-complexity/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2025%20-%20Running%20Time%20and%20Complexity/solution.py)|\n|26|[Nested Logic](https://www.hackerrank.com/challenges/30-nested-logic/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2026%20-%20Nested%20Logic/solution.py)|\n|27|[Testing](https://www.hackerrank.com/challenges/30-testing/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2027%20-%20Testing/solution.py)|\n|28|[RegEx, Patterns, and Intro to Databases](https://www.hackerrank.com/challenges/30-regex-patterns/problem)|30|[solution.py](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2028%20-%20RegEx%2C%20Patterns%2C%20and%20Intro%20to%20Databases/solution.py)|\n|29|[Bitwise AND](https://www.hackerrank.com/challenges/30-bitwise-and/problem)|30|[solution.java](https://github.com/aesavas/HackerRank/blob/master/30%20Days%20of%20Code/Day%2029%20-%20Bitwise%20AND/Solution.java)|\n\n---------------------------------------------\n> Once I solve problems, I will add solutions.\n>> Note : If you have better solution, you can share with me via e-mail. :mailbox:\n" }, { "alpha_fraction": 0.5311203598976135, "alphanum_fraction": 0.54356849193573, "avg_line_length": 29.125, "blob_id": "38ca5657f535ade492446301e4e41f4eaaf9c76a", "content_id": "1294f0396bbeecab90d2fdbbd4eab7ea7cc7f9b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "permissive", "max_line_length": 66, "num_lines": 8, "path": "/30 Days of Code/Day 07 - Arrays/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-arrays/problem\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n arr = list(map(int, input().rstrip().split()))\n print(\" \".join(str(i) for i in arr[::-1]))\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 19.25, "blob_id": "ed21ad8cd8402a9e9d0a230339b4cc168609e277", "content_id": "2e0cba79fa66fbeaf30678c0116a7d0325becb1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "permissive", "max_line_length": 85, "num_lines": 16, "path": "/Python/Strings/String Split and Join/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-string-split-and-join/problem\n\"\"\"\n\n\ndef split_and_join(line):\n sentence = line.split()\n sentence = \"-\".join(sentence)\n return sentence\n\n\nif __name__ == '__main__':\n line = input()\n result = split_and_join(line)\n print(result)\n" }, { "alpha_fraction": 0.5720720887184143, "alphanum_fraction": 0.5765765905380249, "avg_line_length": 33.153846740722656, "blob_id": "ff48b49457ce7fe76f44e6c2b666ef24c30d7883", "content_id": "9044d59d70ef2bb8127db5b60056ccb2cb54c500", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "permissive", "max_line_length": 79, "num_lines": 13, "path": "/Python/Itertools/Itertools.combinations/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/itertools-combinations/problem\n\"\"\"\nfrom itertools import combinations\n\nif __name__ == \"__main__\":\n S, k = input().split()\n for i in range(1, int(k)+1):\n combinationList = [sorted(sorted(combinations(S, i))[j])\n for j in range(len(sorted(combinations(S, i))))]\n for k in sorted(combinationList):\n print(\"\".join(k))\n" }, { "alpha_fraction": 0.43134087324142456, "alphanum_fraction": 0.4426494240760803, "avg_line_length": 21.962963104248047, "blob_id": "9ff34889f2e98553b63be312bfed1f25b2dba98f", "content_id": "644477c45d2c1fbf7f9118ae05b2c0c985556e3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 619, "license_type": "permissive", "max_line_length": 69, "num_lines": 27, "path": "/Java/Introduction/Java If-Else/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/java-if-else/problem\n ------------------------\n*/\n\nimport java.util.Scanner;\n\npublic class Solution {\n public static void main(String[] args) {\n Scanner scan = new Scanner(System.in);\n int n = scan.nextInt();\n scan.close();\n\n if(n % 2 == 1){\n System.out.println(\"Weird\");\n }\n else{\n if(n % 2 == 0 && n >= 6 && n <= 20){\n System.out.println(\"Weird\");\n }\n else{\n System.out.println(\"Not Weird\");\n }\n }\n }\n}" }, { "alpha_fraction": 0.5736433863639832, "alphanum_fraction": 0.5736433863639832, "avg_line_length": 24.799999237060547, "blob_id": "bf5a8a841b7e8c705f029483b16f7cc5bf7ff89c", "content_id": "d14696dc898036171da29680bc01f98762ab1241", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "permissive", "max_line_length": 67, "num_lines": 10, "path": "/Python/Sets/Set Add/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/py-set-add/problem\n\"\"\"\nif __name__ == \"__main__\":\n counter = int(input())\n countries = set()\n for _ in range(counter):\n countries.add(input())\n print(len(countries))\n" }, { "alpha_fraction": 0.5316725969314575, "alphanum_fraction": 0.5380783081054688, "avg_line_length": 30.244443893432617, "blob_id": "e09d05f8a956a3ddb48aee9c0c6d7fb0fea418a3", "content_id": "0de24030be154d2640e5b0b84bce0a12b9dbe801", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1405, "license_type": "permissive", "max_line_length": 142, "num_lines": 45, "path": "/Java/BigNumber/Java Primality Test/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/java-primality-test/problem\n ------------------------\n*/\n\n\nimport java.util.*;\n\npublic class Solution {\n\n public static boolean canWin(int leap, int[] game) {\n // Return true if you can win the game; otherwise, return false.\n return isSolvable(leap, game, 0);\n }\n\n public static boolean isSolvable(int leap, int[] game, int i){\n if(i >= game.length) return true;\n if(i < 0 || game[i] == 1) return false; //if i is minus this means return to much or if game[i] is 1 it means location visited before.\n\n game[i] = 1; // if no true or false it means you can visit the location.\n\n return isSolvable(leap, game, i + 1) || //It means go a step forward\n isSolvable(leap, game, i - 1) || //It means go back one step\n isSolvable(leap, game, i + leap); // It means go a leap forward.\n\n }\n\n public static void main(String[] args) {\n Scanner scan = new Scanner(System.in);\n int q = scan.nextInt();\n while (q-- > 0) {\n int n = scan.nextInt();\n int leap = scan.nextInt();\n \n int[] game = new int[n];\n for (int i = 0; i < n; i++) {\n game[i] = scan.nextInt();\n }\n\n System.out.println( (canWin(leap, game)) ? \"YES\" : \"NO\" );\n }\n scan.close();\n }\n}" }, { "alpha_fraction": 0.4923766851425171, "alphanum_fraction": 0.5112107396125793, "avg_line_length": 21.299999237060547, "blob_id": "faaa2515c17f4b0781ee6e888cd2f2e88b2fad07", "content_id": "ddf276f10f90beee8e859aacdeb6a1c64911d586", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1115, "license_type": "permissive", "max_line_length": 71, "num_lines": 50, "path": "/30 Days of Code/Day 29 - Bitwise AND/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-bitwise-and/problem\n*/\n\nimport java.io.*;\nimport java.math.*;\nimport java.security.*;\nimport java.text.*;\nimport java.util.*;\nimport java.util.concurrent.*;\nimport java.util.regex.*;\n\npublic class Solution {\n public static int bitwiseAnd(int n, int k){\n int A = 1;\n int maxValue = 0;\n while(A <= n-1){\n int B = A + 1;\n while(B <= n){\n if((A&B) < k && maxValue < (A&B)) maxValue = A&B;\n B++;\n }\n A++;\n }\n\n return maxValue;\n\n }\n\n\n\n private static final Scanner scanner = new Scanner(System.in);\n\n public static void main(String[] args) {\n int t = scanner.nextInt();\n scanner.skip(\"(\\r\\n|[\\n\\r\\u2028\\u2029\\u0085])?\");\n\n for (int tItr = 0; tItr < t; tItr++) {\n String[] nk = scanner.nextLine().split(\" \");\n\n int n = Integer.parseInt(nk[0]);\n\n int k = Integer.parseInt(nk[1]);\n System.out.println(bitwiseAnd(n,k));\n }\n\n scanner.close();\n }\n}\n" }, { "alpha_fraction": 0.5338078141212463, "alphanum_fraction": 0.5569394826889038, "avg_line_length": 21.520000457763672, "blob_id": "c42e6b4d01ec87744ba0b09105d0253a364743ce", "content_id": "a312dd65f086e076cc208b1ed2ae6a0943b2ef75", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "permissive", "max_line_length": 67, "num_lines": 25, "path": "/30 Days of Code/Day 20 - Sorting/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-sorting/problem\n\"\"\"\n\nimport sys\n\nn = int(input().strip())\na = list(map(int, input().strip().split(' ')))\n\ntotalSwaps = 0\nfor i in range(n):\n numberOfSwaps = 0\n for j in range(n-1):\n if a[j] > a[j+1]:\n a[j], a[j+1] = a[j+1], a[j]\n numberOfSwaps += 1\n totalSwaps += 1\n \n if numberOfSwaps == 0: break\n\n\nprint(\"Array is sorted in {} swaps.\".format(totalSwaps))\nprint(\"First Element: {}\".format(a[0]))\nprint(\"Last Element: {}\".format(a[-1]))" }, { "alpha_fraction": 0.5783132314682007, "alphanum_fraction": 0.5783132314682007, "avg_line_length": 29.18181800842285, "blob_id": "f4131e80b25d2852d8fcda31ad03d76ed1bc6833", "content_id": "22c1f4ce2ecea92cb29d69ab52a74cdf474aaaac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "permissive", "max_line_length": 77, "num_lines": 11, "path": "/Python/Sets/Symmetric Difference/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/symmetric-difference/problem\n\"\"\"\nif __name__ == \"__main__\":\n counterM = int(input())\n M = set(map(int, input().split()))\n counterN = int(input())\n N = set(map(int, input().split()))\n for i in sorted(M.symmetric_difference(N)):\n print(i)\n" }, { "alpha_fraction": 0.603960394859314, "alphanum_fraction": 0.6856435537338257, "avg_line_length": 27.85714340209961, "blob_id": "f580dbc06b2eb70511ec2a7305dc193ed201c7e5", "content_id": "83eaa3f4f21e67fe2b7f66e2efa636f9b6ab3aba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 404, "license_type": "permissive", "max_line_length": 147, "num_lines": 14, "path": "/readme.md", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <a href=\"https://www.hackerrank.com/aesavas\">\n <img height=85 src=\"https://d3keuzeb2crhkn.cloudfront.net/hackerrank/assets/styleguide/logo_wordmark-f5c5eb61ab0a154c3ed9eda24d0b9e31.svg\">\n </a>\n <br>My solutions to HackerRank problems\n</p>\n\n* [Python](./Python/)\n\n* [Project Euler+](./Project%20Euler%2B)\n\n* [30 Days Of Code](./30%20Days%20of%20Code)\n\n* [Java](./Java)\n" }, { "alpha_fraction": 0.5656565427780151, "alphanum_fraction": 0.5757575631141663, "avg_line_length": 28.117647171020508, "blob_id": "cda2374686407a08db6565cc7a2223a098b02742", "content_id": "b25c8623bf3b0fe433898461bc95a3088e8ffa11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 495, "license_type": "permissive", "max_line_length": 80, "num_lines": 17, "path": "/Python/Sets/Set Discard,Remove,Pop/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/py-set-discard-remove-pop/forum\n\"\"\"\nn = int(input())\ns = set(map(int, input().split()))\nfor _ in range(int(input())):\n operation = input().split()\n if operation[0].lower() == \"discard\":\n s.discard(int(operation[1]))\n elif operation[0].lower() == \"remove\":\n s.remove(int(operation[1]))\n elif operation[0].lower() == \"pop\":\n s.pop()\n else:\n print(\"Wrong operation!\")\nprint(sum(s))\n" }, { "alpha_fraction": 0.545643150806427, "alphanum_fraction": 0.5643153786659241, "avg_line_length": 21.952381134033203, "blob_id": "01181a2faa3cdb9a6c9a78deb3482644e8e1b4bc", "content_id": "7b69ed92a8625ad5c10c8abc3ff0e2f10b119042", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "permissive", "max_line_length": 83, "num_lines": 21, "path": "/Project Euler+/Problem 003/Solution 3.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\nauthor : Ali Emre SAVAS\nLink : https://www.hackerrank.com/contests/projecteuler/challenges/euler003/problem\n\"\"\"\n\n\ndef findLargestPrimeFactor(number):\n factor = 2\n while(factor <= int(number ** 0.5)):\n if(number % factor == 0):\n number /= factor\n else:\n factor += 1\n return int(number)\n\n\nif __name__ == \"__main__\":\n t = int(input().strip())\n for a0 in range(t):\n n = int(input().strip())\n print(findLargestPrimeFactor(n))\n" }, { "alpha_fraction": 0.7623082399368286, "alphanum_fraction": 0.7945950627326965, "avg_line_length": 192.3275909423828, "blob_id": "222fc837d18f1754f0dba87f34f0f39bd528f014", "content_id": "75bdb39a9c04f159c03249b864346c265b96dec6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11212, "license_type": "permissive", "max_line_length": 271, "num_lines": 58, "path": "/Python/README.md", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "# Python\n\n|Number|Subdomain|Challenge|Points|Solution|\n|:----:|:-------:|:-------:|:----:|:------:|\n|1|Introduction|[Say Hello World with Python](https://www.hackerrank.com/challenges/py-hello-world/problem)|5|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Introduction/Say%20Hello%20World%20with%20Python/solution.py)|\n|2|Introduction|[Python If-Else](https://www.hackerrank.com/challenges/py-if-else/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/tree/master/Python/Introduction/Python%20If-Else/solution.py)|\n|3|Introduction|[Arithmetic Operators](https://www.hackerrank.com/challenges/python-arithmetic-operators/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Introduction/Python%20Arithmetic%20Operators/solution.py)|\n|4|Introduction|[Python: Division](https://www.hackerrank.com/challenges/python-division/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Introduction/Python%20Division/solution.py)|\n|5|Introduction|[Loops](https://www.hackerrank.com/challenges/python-loops/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Introduction/Python%20Loops/solution.py)|\n|6|Introduction|[Write a Function](https://www.hackerrank.com/challenges/write-a-function/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Introduction/Python%20Write%20a%20Function/solution.py)|\n|7|Introduction|[Print Function](https://www.hackerrank.com/challenges/python-print/problem)|20|[solution.py](https://github.com/aesavas/HackerRank/tree/master/Python/Introduction/Python%20Print)|\n|8|Basic Data Types|[List Comprehensions](https://www.hackerrank.com/challenges/list-comprehensions/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Basic%20Data%20Types/List%20Comprehensions/solution.py)|\n|9|Basic Data Types|[Find the Runner-Up Score!](https://www.hackerrank.com/challenges/find-second-maximum-number-in-a-list/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Basic%20Data%20Types/Find%20the%20Runner-Up%20Score/solution.py)|\n|10|Basic Data Types|[Nested Lists](https://www.hackerrank.com/challenges/nested-list/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Basic%20Data%20Types/Nested%20List/solution.py)|\n|11|Basic Data Types|[Finding the percentage](https://www.hackerrank.com/challenges/finding-the-percentage/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Basic%20Data%20Types/Finding%20The%20Percentage/solution.py)|\n|12|Basic Data Types|[Lists](https://www.hackerrank.com/challenges/python-lists/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Basic%20Data%20Types/Python%20Lists/solution.py)|\n|13|Basic Data Types|[Tuples](https://www.hackerrank.com/challenges/python-tuples/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Basic%20Data%20Types/Python%20Tuple/solution.py)|\n|14|Strings|[sWAP cASE](https://www.hackerrank.com/challenges/swap-case/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/sWAP%20cASE/solution.py)|\n|15|Strings|[String Split and Join](https://www.hackerrank.com/challenges/python-string-split-and-join/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/String%20Split%20and%20Join/solution.py)|\n|16|Strings|[What's your name?](https://www.hackerrank.com/challenges/whats-your-name/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/What's%20Your%20Name/solution.py)|\n|17|Strings|[Mutations](https://www.hackerrank.com/challenges/python-mutations/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Mutations/solution.py)|\n|18|Strings|[Find a String](https://www.hackerrank.com/challenges/find-a-string/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Find%20a%20String/solution.py)|\n|19|Strings|[String Validators](https://www.hackerrank.com/challenges/string-validators/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/String%20Validators/solution.py)|\n|20|Strings|[Text Alignment](https://www.hackerrank.com/challenges/text-alignment/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Text%20Alignment/solution.py)|\n|21|Strings|[Text Wrap](https://www.hackerrank.com/challenges/text-wrap/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Text%20Wrap/solution.py)|\n|22|Strings|[Designer Door Mat](https://www.hackerrank.com/challenges/designer-door-mat/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Designer%20Door%20Mat/solution.py)|\n|23|Strings|[String Formatting](https://www.hackerrank.com/challenges/python-string-formatting/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/String%20Formatting/solution.py)|\n|24|Strings|[Alphabet Rangoli](https://www.hackerrank.com/challenges/alphabet-rangoli/problem)|20|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Alphabet%20Rangoli/solution.py)|\n|25|Strings|[Capitalize!](https://www.hackerrank.com/challenges/capitalize/problem)|20|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Capitalize/solution.py)|\n|26|Strings|[The Minion Game](https://www.hackerrank.com/challenges/the-minion-game/problem)|40|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/The%20Minion%20Game/solution.py)|\n|27|Strings|[Merge the Tools!](https://www.hackerrank.com/challenges/merge-the-tools/problem)|40|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Strings/Merge%20the%20Tools/solution.py)|\n|28|Sets|[Introduction to Sets](https://www.hackerrank.com/challenges/py-introduction-to-sets/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/Introduction%20to%20Sets/solution.py)|\n|29|Sets|[No Idea!](https://www.hackerrank.com/challenges/no-idea/problem)|50|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/No%20Idea!/solution.py)|\n|30|Sets|[Symmetric Difference](https://www.hackerrank.com/challenges/symmetric-difference/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/Symmetric%20Difference/solution.py)|\n|31|Sets|[Set .add()](https://www.hackerrank.com/challenges/py-set-add/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/Set%20Add/solution.py)|\n|32|Sets|[Set .discard(), .remove() & .pop()](https://www.hackerrank.com/challenges/py-set-discard-remove-pop/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/Set%20Discard%2CRemove%2CPop/solution.py)|\n|33|Sets|[Set .union() Operation](https://www.hackerrank.com/challenges/py-set-union/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/.union()%20Operation/solution.py)|\n|34|Sets|[Set .intersection() Operation](https://www.hackerrank.com/challenges/py-set-intersection-operation)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/.intersection()%20Operation/solution.py)|\n|35|Sets|[Set .difference() Operation](https://www.hackerrank.com/challenges/py-set-difference-operation/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/.difference%20Operation/solution.py)|\n|36|Sets|[Set .symmetric_difference() Operation](https://www.hackerrank.com/challenges/py-set-symmetric-difference-operation/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/.symmetric_difference()%20Operation/solution.py)|\n|37|Sets|[Set Mutations](https://www.hackerrank.com/challenges/py-set-mutations/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/Set%20Mutations/solution.py)|\n|38|Sets|[The Captain's Room](hackerrank.com/challenges/py-the-captains-room/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/tree/master/Python/Sets/The%20Captain's%20Room)|\n|39|Sets|[Check Subset](https://www.hackerrank.com/challenges/py-check-subset/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/Check%20Subset/solution.py)|\n|40|Sets|[Check Strict Superset](https://www.hackerrank.com/challenges/py-check-strict-superset)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Sets/Check%20Strict%20Superset/solution.py)|\n|41|Math|[Polar Coordinates](https://www.hackerrank.com/challenges/polar-coordinates/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Math/Polar%20Coordinates/solution.py)|\n|42|Math|[Find Angle MBC](https://www.hackerrank.com/challenges/find-angle/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Math/Find%20Angle/solution.py)|\n|43|Math|[Triangle Quest 2](https://www.hackerrank.com/challenges/triangle-quest-2/problem)|20|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Math/Triangle%20Quest%202/solution.py)|\n|44|Math|[Mod Divmod](https://www.hackerrank.com/challenges/python-mod-divmod/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Math/Mod%20Divmod/solution.py)|\n|45|Math|[Power - Mod Power](https://www.hackerrank.com/challenges/python-power-mod-power/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Math/Power%20Mod%20Power/solution.py)|\n|46|Math|[Integers Come In All Sizes](https://www.hackerrank.com/challenges/python-integers-come-in-all-sizes/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Math/Integer%20Come%20In%20All%20Sizes/solution.py)|\n|47|Math|[Triangle Quest](https://www.hackerrank.com/challenges/python-quest-1)|20|[solution.py](https://github.com/aesavas/HackerRank/blob/master/Python/Math/Triangle%20Quest/solution.py)|\n|48|Itertools|[itertools.product()](https://www.hackerrank.com/challenges/itertools-product/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/tree/master/Python/Itertools/Itertools.product())|\n|49|Itertools|[itertools.permutations()](https://www.hackerrank.com/challenges/itertools-permutations/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/tree/master/Python/Itertools/Itertools.permutations)|\n|50|Itertools|[itertools.combinations()](https://www.hackerrank.com/challenges/itertools-combinations/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/tree/master/Python/Itertools/Itertools.combinations)|\n|51|Itertools|[itertools.combinations_with_replacement()](https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem)|10|[solution.py](https://github.com/aesavas/HackerRank/tree/master/Python/Itertools/Itertools.combinations_with_replacement)|\n\n> Once I solve problems, I will add solutions.\n>> Note: If you have better solution, you can share with me via e-mail. :mailbox:" }, { "alpha_fraction": 0.6451078057289124, "alphanum_fraction": 0.6451078057289124, "avg_line_length": 45.38461685180664, "blob_id": "381c7f617658539cc274585e712b6fd1782b9e8c", "content_id": "40cd5cb265b2ea880666404e5ce275cb66945fee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "permissive", "max_line_length": 121, "num_lines": 13, "path": "/Python/Itertools/Itertools.combinations_with_replacement/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem\n\"\"\"\nfrom itertools import combinations_with_replacement\n\nif __name__ == \"__main__\":\n S, k = input().split()\n # We need to sort binary list in the list. So, I use like this \"sorted(combinations_with_replacement(S, int(k)))[j])\"\n combinationList = [sorted(sorted(combinations_with_replacement(S, int(k)))[\n j]) for j in range(len(sorted(combinations_with_replacement(S, int(k)))))]\n for i in sorted(combinationList):\n print(\"\".join(i))\n" }, { "alpha_fraction": 0.43262410163879395, "alphanum_fraction": 0.4373522400856018, "avg_line_length": 26.322580337524414, "blob_id": "a9b1f77a414f47d7ba12b4f23ceb3656c40cb296", "content_id": "cdb2f8b56e2e2a78d1b41966f5c4e533949a4975", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 846, "license_type": "permissive", "max_line_length": 81, "num_lines": 31, "path": "/Java/Introduction/Java Loops II/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/java-loops/problem\n ------------------------\n\n I do not want to use Math.pow() function. Thus, I try to solve with for loop.\n*/\n\nimport java.util.*;\n\npublic class Solution{\n public static void main(String []argh){\n Scanner in = new Scanner(System.in);\n int t=in.nextInt();\n for(int i=0;i<t;i++){\n int a = in.nextInt();\n int b = in.nextInt();\n int n = in.nextInt();\n for(int j=0; j<n;j++){\n int temp = b; \n for(int k=0; k<j; k++){\n temp *= 2; \n }\n a += temp; // I kept the sum in \"a\" variable.\n System.out.print(a + \" \");\n }\n System.out.println();\n }\n in.close();\n }\n}" }, { "alpha_fraction": 0.4832041263580322, "alphanum_fraction": 0.5038759708404541, "avg_line_length": 24.799999237060547, "blob_id": "69dcd6f7531747fcd42223eeea16fed34e664e96", "content_id": "17ef651efb5f3b2110ac6b5e7957cd0f01d789e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "permissive", "max_line_length": 74, "num_lines": 15, "path": "/30 Days of Code/Day 10 - Binary Numbers/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-binary-numbers/problem\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n number = bin(n)[2:]\n counter, result = 0, 0\n for i in range(len(number)):\n if number[i] == '1':\n counter += 1\n result = max(result, counter)\n else:\n counter = 0\n print(result)\n" }, { "alpha_fraction": 0.5295508503913879, "alphanum_fraction": 0.5342789888381958, "avg_line_length": 27.16666603088379, "blob_id": "5eae4044a9300dcdbbc07e0ddfce6f8f1b6913f8", "content_id": "35ad037e4edd124a21d78e972e46fc30e7a16b7c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 846, "license_type": "permissive", "max_line_length": 82, "num_lines": 30, "path": "/Java/Strings/Java String Tokens/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/java-string-tokens/problem\n ------------------------\n*/\n\nimport java.util.*;\n\npublic class Solution {\n\n public static void main(String[] args) {\n Scanner scan = new Scanner(System.in);\n String s = scan.nextLine();\n scan.close();\n // We need to delete spaces beginning of the string and end of the string.\n s = s.trim();\n\n // We have to check if the string only space, we finish the program.\n if(s.length() == 0){\n System.out.println(0);\n System.exit(0);\n }\n String[] wordArray = s.split(\"[ !,?._'@]+\");\n System.out.println(wordArray.length);\n for(int i=0; i<wordArray.length; i++){\n System.out.println(wordArray[i]);\n }\n scan.close();\n }\n}\n\n" }, { "alpha_fraction": 0.6132264733314514, "alphanum_fraction": 0.6132264733314514, "avg_line_length": 37.38461685180664, "blob_id": "3ed2c8c4180a22be39a6c68b27ad2cd5e30b2138", "content_id": "2f91343735faf42bc074be74a2424a1e43bc120f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "permissive", "max_line_length": 101, "num_lines": 13, "path": "/Python/Sets/No Idea!/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/no-idea/problem\n\"\"\"\n\n# The important idea is we have to search on set not on list. (For time complexity.)\nif __name__ == \"__main__\":\n n, m = map(int, input().split())\n numberList = list(map(int, input().split()))\n setA = set(map(int, input().split()))\n setB = set(map(int, input().split()))\n happiness = len([i for i in numberList if i in setA]) - len([i for i in numberList if i in setB])\n print(happiness)\n" }, { "alpha_fraction": 0.6106194853782654, "alphanum_fraction": 0.6106194853782654, "avg_line_length": 24.11111068725586, "blob_id": "692244e3f133031b15b52c6001f031bb4dc60e4b", "content_id": "c8c40618addc8df4e9965e8637181c4a173c381b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "permissive", "max_line_length": 74, "num_lines": 9, "path": "/Python/Math/Polar Coordinates/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/polar-coordinates/problem\n\"\"\"\nfrom cmath import phase\nif __name__ == \"__main__\":\n z = input()\n print(abs(complex(z)))\n print(phase(complex(z)))\n" }, { "alpha_fraction": 0.452162504196167, "alphanum_fraction": 0.4626474380493164, "avg_line_length": 27.259260177612305, "blob_id": "183022f34e2b1c7cf67eb81188e4eadc3cc202a2", "content_id": "bd77b9f80c7e5dfae351c6582f6cb0faaf06b9ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "permissive", "max_line_length": 71, "num_lines": 27, "path": "/30 Days of Code/Day 06 - Let's Review/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-review-loop/problem\n\"\"\"\n\nif __name__ == '__main__':\n S = list()\n N = int(input())\n for _ in range(N):\n s = input()\n S.append(s)\n\n # This solution is classical way\n for i in S:\n odd, even = \"\", \"\" # because we have to reset every step\n for j in range(len(i)):\n if(j % 2 == 0):\n even += i[j]\n else:\n odd += i[j]\n print(\"{} {}\".format(even, odd))\n\n # This solution with list comprehensions\n for i in S:\n even = [i[j] for j in range(len(i)) if(j % 2 == 0)]\n odd = [i[j] for j in range(len(i)) if(j % 2 == 1)]\n print(\"{} {}\".format(\"\".join(even), \"\".join(odd)))\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6263566017150879, "avg_line_length": 36.94117736816406, "blob_id": "d626f8bae83ed7f6b8c18666caac2bdb34926f1e", "content_id": "5b50cf14777e9d53f2b13ffb73d7eac7834e36ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "permissive", "max_line_length": 147, "num_lines": 17, "path": "/Project Euler+/Problem 001/Solution 1.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\nauthor : Ali Emre SAVAS\nLink : https://www.hackerrank.com/contests/projecteuler/challenges/euler001/problem\n\"\"\"\ndef sumOfSequence(x):\n return x*(x+1)\n\n\nif __name__ == \"__main__\":\n t = int(input())\n for i in range(t):\n n = int(input())\n n -= 1 # Because problem says below n\n # We substracts multiplies of 15 because they exist two times in the sequence\n # Also, we use bitwise right-shift for division. Because hackerrank does not accept /2. Some test cases' inputs contain very large numbers.\n print(int(3*sumOfSequence(n//3) + 5 *\n sumOfSequence(n//5) - 15*sumOfSequence(n//15)) >> 1)\n" }, { "alpha_fraction": 0.5913370847702026, "alphanum_fraction": 0.5951035618782043, "avg_line_length": 36.92856979370117, "blob_id": "3811fdc64037fd54891db5bde28d1e63bd1042a4", "content_id": "db14b1581cb0d4f0381a3aa165498f83f50fdf83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 531, "license_type": "permissive", "max_line_length": 103, "num_lines": 14, "path": "/Python/Sets/Check Strict Superset/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/py-check-strict-superset/problem\n\"\"\"\nif __name__ == \"__main__\":\n setA = set(map(int, input().split()))\n n = int(input())\n numberOfSubset = 0\n for _ in range(n):\n tempSet = set(map(int, input().split()))\n if (tempSet.issubset(setA)):\n # If numberOfSubset is equal number of other sets, setA will be superset all of these sets.\n numberOfSubset += 1\n print(\"{}\".format(True if numberOfSubset == n else False))\n" }, { "alpha_fraction": 0.6309719681739807, "alphanum_fraction": 0.6326194405555725, "avg_line_length": 23.280000686645508, "blob_id": "2aa2c130fa185cabfef40567c6542bf10ffdbb4c", "content_id": "3819d8cbaa468e303be2b5e4fd613dc216af7706", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "permissive", "max_line_length": 72, "num_lines": 25, "path": "/Python/Strings/Mutations/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/whats-your-name/problem\n\"\"\"\n\n# In this problem there are two possible solutions.\n\n\ndef mutate_string(string, position, character):\n l = list(string)\n l[position] = character\n return string = \"\".join(l)\n\n\ndef mutate_string_alternative(string, position, character):\n return string[:position] + character + string[position+1:]\n\n\nif __name__ == '__main__':\n s = input()\n i, c = input().split()\n s_new = mutate_string(s, int(i), c)\n print(s_new)\n s_new = mutate_string_alternative(s, int(i), c)\n print(s_new)\n" }, { "alpha_fraction": 0.501886785030365, "alphanum_fraction": 0.502964973449707, "avg_line_length": 30.440677642822266, "blob_id": "89405a8acf7745dd990768c082dc057fc9159a96", "content_id": "670e077fb2befd43e4e475399f64115e5c19c976", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "permissive", "max_line_length": 113, "num_lines": 59, "path": "/30 Days of Code/Day 24 - More Linked Lists/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-linked-list-deletion/problem\n\"\"\"\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.next = None \nclass Solution: \n def insert(self,head,data):\n p = Node(data) \n if head==None:\n head=p\n elif head.next==None:\n head.next=p\n else:\n start=head\n while(start.next!=None):\n start=start.next\n start.next=p\n return head \n def display(self,head):\n current = head\n while current:\n print(current.data,end=' ')\n current = current.next\n\n def removeDuplicates(self,head):\n \"\"\"\n counterNode = it travels on linked list for duplicate data.\n currentNode = it shows actual node to compare counterNode\n \"\"\"\n if head != None:\n currentNode = head\n if(currentNode.next): \n counterNode = currentNode.next\n while(currentNode):\n if(counterNode):\n if(currentNode.data == counterNode.data): \n currentNode.next = None #If there are duplicate data, we cut connection between them.\n else:\n currentNodenext = counterNode # If there is no duplite, we connect again two nodes.\n currentNode = currentNode.next\n counterNode = counterNode.next\n else:\n break\n return head\n\n \n\nmylist= Solution()\nT=int(input())\nhead=None\nfor i in range(T):\n data=int(input())\n head=mylist.insert(head,data) \nhead=mylist.removeDuplicates(head)\nmylist.display(head); " }, { "alpha_fraction": 0.6627078652381897, "alphanum_fraction": 0.6627078652381897, "avg_line_length": 37.272727966308594, "blob_id": "18ac90724c66fb9b592a614d2eb1015e5e70a73a", "content_id": "a9611aac18f82d5bab850116bec52ae7bba4e6fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "permissive", "max_line_length": 74, "num_lines": 11, "path": "/Python/Strings/String Validators/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/string-validators/problem\n\"\"\"\nif __name__ == '__main__':\n s = input()\n print(any(character.isalnum() for character in s))\n print(any(character.isalpha() for character in s))\n print(any(character.isdigit() for character in s))\n print(any(character.islower() for character in s))\n print(any(character.isupper() for character in s))\n" }, { "alpha_fraction": 0.642201840877533, "alphanum_fraction": 0.64449542760849, "avg_line_length": 30.14285659790039, "blob_id": "d0ec9f07b91a638f9d656cc5d9c49686e0bb61b7", "content_id": "58844dae3d39743e6451c9170024547e5638cc3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "permissive", "max_line_length": 134, "num_lines": 14, "path": "/Python/Math/Find Angle/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/find-angle/problem\n\"\"\"\n\"\"\"\nIt comes from triangle median formula. We have ABC right trianle, also we have MB median. It means that AM, MC and MB edges are equal.\nSo, we can solve this problem with inverse of tan.\n\"\"\"\n\nimport math\nif __name__ == \"__main__\":\n AB = int(input())\n AC = int(input())\n print(str(round(math.degrees(math.atan2(AB, AC)))) + \"°\")\n" }, { "alpha_fraction": 0.5981012582778931, "alphanum_fraction": 0.5981012582778931, "avg_line_length": 30.600000381469727, "blob_id": "e4569dd345a79be4e567ec14f99f6b0db3b20a75", "content_id": "f9d530971ed618e8c6e94b1019bb40f080dc46c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "permissive", "max_line_length": 74, "num_lines": 10, "path": "/Python/Sets/.difference Operation/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/py-set-difference/problem\n\"\"\"\nif __name__ == \"__main__\":\n numberOfSetA = int(input())\n setA = set(map(int, input().split()))\n numberOfSetB = int(input())\n setB = set(map(int, input().split()))\n print(len(setA.difference(setB)))\n" }, { "alpha_fraction": 0.5125135779380798, "alphanum_fraction": 0.5353645086288452, "avg_line_length": 25.285715103149414, "blob_id": "992fcc81ad4ae5911ede847abaed015700f11628", "content_id": "a4a36bf77dfffe253f38a65d35d1f6d59d21a6d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 919, "license_type": "permissive", "max_line_length": 72, "num_lines": 35, "path": "/30 Days of Code/Day 26 - Nested Logic/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-nested-logic/problem\n\"\"\"\n\nclass Date:\n def __init__(self, day, month, year):\n self.day = day\n self.month = month\n self.year = year\n\ndef calculateFine(r, e):\n \"\"\"\n r = returnedDate\n e = expectedDate\n \"\"\"\n\n if r.year == e.year:\n if r.month == e.month:\n if r.day == e.day: return 0\n else: return abs(r.day - e.day) * 15\n else:\n if r.month < e.month: return 0\n else : return abs(r.month - e.month) * 500\n else:\n if r.year < e.year: return 0\n else: return 10000\n\n\nif __name__ == \"__main__\":\n date = list(map(int, input().split()))\n returnDate = Date(date[0], date[1], date[2])\n date = list(map(int, input().split()))\n expectedDate = Date(date[0], date[1], date[2])\n print(calculateFine(returnDate,expectedDate))" }, { "alpha_fraction": 0.5009980201721191, "alphanum_fraction": 0.5329341292381287, "avg_line_length": 25.36842155456543, "blob_id": "3ffc65d8df9d2aae69954a72036fe6b71abca625", "content_id": "fc96df18974e7347a73746a9cea8cc3f3b730ea0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "permissive", "max_line_length": 74, "num_lines": 19, "path": "/Python/Strings/Designer Door Mat/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/designer-door-mat/problem\n\"\"\"\nnm = input().split()\nn, m = int(nm[0]), int(nm[1])\nfigure = \".|.\"\n# TOP SECTION\nfor i in range(n//2):\n print((figure*i).rjust((m//2)-1, \"-\") +\n figure+(figure*i).ljust((m//2)-1, \"-\"))\n\n# WELCOME SECTION\nprint(\"WELCOME\".center(m, \"-\"))\n\n# BOTTOM SECTION\nfor i in range((n//2), 0, -1):\n print((figure*(i-1)).rjust((m//2)-1, \"-\") +\n figure+(figure*(i-1)).ljust((m//2)-1, \"-\"))\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6354166865348816, "avg_line_length": 18.299999237060547, "blob_id": "3e15e71c2729a1e76452838815372174402f4e69", "content_id": "98464cd5e28b3fc3e3c2f35f738a6b99f4e31029", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "permissive", "max_line_length": 88, "num_lines": 10, "path": "/30 Days of Code/Day 16 - Exceptions/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-exceptions-string-to-integer/problem\n\"\"\"\n\nS = input().strip()\ntry:\n print(int(S))\nexcept:\n print(\"Bad String\")" }, { "alpha_fraction": 0.6005547642707825, "alphanum_fraction": 0.6088765859603882, "avg_line_length": 35.04999923706055, "blob_id": "2a1fa48d3074734a63d5ce9a5db58d295b9a8c70", "content_id": "7f70dc91c4383b038c9565534392bb91cdebe7c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 721, "license_type": "permissive", "max_line_length": 123, "num_lines": 20, "path": "/Python/Strings/String Formatting/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-string-formatting/problem\n\"\"\"\n\n\ndef print_formatted(number):\n space = len(bin(number)[2:])\n for i in range(1, n+1):\n # I started second character because first two digit is useless for this problem.\n octNumber, hexNumber, binNumber = oct(\n i)[2:], hex(i)[2:].upper(), bin(i)[2:]\n # This string formating reference from pyformat.info (You can see details in Padding and aligning strings section.)\n print(\"{:>{space}} {:>{space}} {:>{space}} {:>{space}}\".format(\n i, octNumber, hexNumber, binNumber, space=space))\n\n\nif __name__ == '__main__':\n n = int(input())\n print_formatted(n)\n" }, { "alpha_fraction": 0.45080500841140747, "alphanum_fraction": 0.6100178956985474, "avg_line_length": 24.454545974731445, "blob_id": "e67bf250c056832e39bd16cbaa2827f71346d799", "content_id": "6bb4c5d3f1bb993b6b6df6a7a9d36a28b7ea26ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "permissive", "max_line_length": 111, "num_lines": 22, "path": "/Python/Math/Triangle Quest/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-quest-1/problem\n\"\"\"\n\"\"\"\nThere is a math trick in this question. I did not know that. If you do not know it, probably you can not do it.\nIntegers greater than 0 and less than 10:\n(10**1)//9 = 1\n(10**2)//9 = 11\n(10**3)//9 = 111\n(10**4)//9 = 1111\n(10**5)//9 = 11111\n(10**6)//9 = 111111\n(10**7)//9 = 1111111\n(10**8)//9 = 11111111\n(10**9)//9 = 111111111\n\nand if we multiply with for loop counter , we can solve problem.\n\"\"\"\n\nfor i in range(1,int(input())):\n print(i * ((10**i)//9))" }, { "alpha_fraction": 0.6060606241226196, "alphanum_fraction": 0.6060606241226196, "avg_line_length": 38.599998474121094, "blob_id": "ae50a1e0abe99b71376c86761b15c5280fe96bae", "content_id": "dd518c714ec90eb8d994406ec961b7e848397ab9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "permissive", "max_line_length": 79, "num_lines": 15, "path": "/Python/Sets/Set Mutations/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/py-set-mutations/problem\n\"\"\"\nif __name__ == \"__main__\":\n numberOfSetA = int(input())\n setA = set(map(int, input().split()))\n numberOfOperation = int(input())\n for i in range(numberOfOperation):\n operation, numberOfItem = input().split()\n tempSet = set(map(int, input().split()))\n # input gives as directly method name. So, we can use in eval() method.\n eval(\"setA.\"+operation.lower()+\"(tempSet)\")\n # I newly learn eval() method and I like it so much :D\n print(sum(setA))\n" }, { "alpha_fraction": 0.6634078025817871, "alphanum_fraction": 0.6689944267272949, "avg_line_length": 34.79999923706055, "blob_id": "76b85f423051291488f2e0be3e77e6b791d8e656", "content_id": "7da9b378e61c709d5f99921468cbba8e21a9361b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "permissive", "max_line_length": 120, "num_lines": 20, "path": "/Python/Sets/The Captain's Room/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/py-the-captains-room/problem\n\"\"\"\n\n\ndef numberOfCaptainRoom(K, listOfRoomNumber):\n # We do not want to access last element of list because if we access it, function will has 'out of index' exception.\n for i in range(0, len(listOfRoomNumber) - 1, K):\n if(listOfRoomNumber[i] != listOfRoomNumber[i+1]):\n return listOfRoomNumber[i]\n # If we does not find in for loop, it has to be last element of list.\n return listOfRoomNumber[-1]\n\n\nif __name__ == \"__main__\":\n K = int(input())\n listOfRoomNumber = list(map(int, input().split()))\n listOfRoomNumber.sort()\n print(numberOfCaptainRoom(K, listOfRoomNumber))\n" }, { "alpha_fraction": 0.5218659043312073, "alphanum_fraction": 0.5437317490577698, "avg_line_length": 22.689655303955078, "blob_id": "82007c73dd7545b6725e782a90b784686b4bb5cf", "content_id": "b434f286d731a36b385733b4cd5849e904bb01c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "permissive", "max_line_length": 87, "num_lines": 29, "path": "/30 Days of Code/Day 25 - Running Time and Complexity/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-running-time-and-complexity/problem\n\n I used simple 6k ± 1 method for these problem.\n src : https://en.wikipedia.org/wiki/Primality_test\n\"\"\"\n\ndef isPrime(number):\n if number == 1:\n return \"Not prime\"\n elif number <= 3:\n return \"Prime\"\n elif (number%2 == 0 or number%3==0):\n return \"Not prime\"\n i = 5\n while i*i <=number:\n print(i)\n if number%i ==0 or number%(i+2)==0:\n return \"Not prime\"\n i = i + 6\n print(i)\n\n return \"Prime\"\n\nif __name__ == '__main__':\n \n for _ in range(int(input())):\n print(isPrime(int(input())))" }, { "alpha_fraction": 0.5239999890327454, "alphanum_fraction": 0.5239999890327454, "avg_line_length": 26.77777862548828, "blob_id": "668b2561dc76f47c8587c1559fb71c9654759d81", "content_id": "f8253b33e6318f169444673bb8388e19b9ac9acd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "permissive", "max_line_length": 79, "num_lines": 9, "path": "/Python/Math/Power Mod Power/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-power-mod-power/problem\n\"\"\"\nif __name__ == \"__main__\":\n a = int(input())\n b = int(input())\n m = int(input())\n print(\"{}\\n{}\".format(pow(a, b), pow(a, b, m)))\n" }, { "alpha_fraction": 0.652694582939148, "alphanum_fraction": 0.6646706461906433, "avg_line_length": 19.875, "blob_id": "1fe0831a5c457c89f075ecc2e7193df443bd8c74", "content_id": "fda1deaeeb16420fc252db19e9c050258eb4c915", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "permissive", "max_line_length": 71, "num_lines": 8, "path": "/30 Days of Code/Day 00 - Hello World/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-hello-world/problem\n\"\"\"\ninput_string = input()\n\nprint('Hello, World.')\nprint(input_string)\n" }, { "alpha_fraction": 0.46929824352264404, "alphanum_fraction": 0.4912280738353729, "avg_line_length": 24.33333396911621, "blob_id": "78f9478d15ac6b733615ac969ddd9dbce21c7557", "content_id": "93c697cb70fa47fb7c3c69f15daca644dae19a57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "permissive", "max_line_length": 69, "num_lines": 9, "path": "/Python/Introduction/Python Loops/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/python-loops/problem\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n if(n >= 1 and n <= 20):\n for i in range(0, n):\n print(i**2)\n" }, { "alpha_fraction": 0.5940319299697876, "alphanum_fraction": 0.6044413447380066, "avg_line_length": 35.9487190246582, "blob_id": "99de690eb8ca3cebdb9c18ca4ac6bc2938ca9d3e", "content_id": "0e205dacc241980cde7af07315e3599087250154", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1441, "license_type": "permissive", "max_line_length": 140, "num_lines": 39, "path": "/Python/Strings/Find a String/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/find-a-string/problem\n\"\"\"\n\n# This solution is much easier understand than other.\n\"\"\"\ndef count_substring(string, sub_string):\n numberOfSubstring = 0 # It counts number of substring inside of string.\n for i in range(0, len(string)-len(sub_string)+1): # Because I can traverse no more than the substring length. \n if(string[i] == sub_string[0]):\n numberOfSameChar = 1 # It counts number of same character substring and string. It starts 1 because it is checked inside of \"if\"\n i += 1\n for j in range(1, len(sub_string)):\n if(string[i] == sub_string[j]):\n i += 1\n numberOfSameChar += 1\n else:\n break\n if(numberOfSameChar == len(sub_string)):\n numberOfSubstring += 1\n return numberOfSubstring\n\"\"\"\n\n\ndef count_substring(string, sub_string):\n numberOfSubstring = 0 # It counts number of substring inside of string.\n # Because I can traverse no more than the substring length.\n for i in range(0, len(string)-len(sub_string)+1):\n if(string[i:i+len(sub_string)] == sub_string):\n numberOfSubstring += 1\n return numberOfSubstring\n\n\nif __name__ == '__main__':\n string = input().strip()\n sub_string = input().strip()\n count = count_substring(string, sub_string)\n print(count)\n" }, { "alpha_fraction": 0.5541236996650696, "alphanum_fraction": 0.5567010045051575, "avg_line_length": 28.846153259277344, "blob_id": "8328e89ac2c748eb9f8cae2eba36fd3bc5e6f8a2", "content_id": "4bf217c258d3f546226e010338fa182b2dede9f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "permissive", "max_line_length": 93, "num_lines": 13, "path": "/Python/Basic Data Types/Find the Runner-Up Score/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/find-second-maximum-number-in-a-list/problem\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n # I converted to list because I wanted to use sort() function.\n arr = list(map(int, input().split()))\n arr.sort(reverse=True)\n for i in arr:\n if (i < arr[0]):\n print(i)\n break\n" }, { "alpha_fraction": 0.6317460536956787, "alphanum_fraction": 0.6317460536956787, "avg_line_length": 25.25, "blob_id": "91f22000ff8052734e1b136cdc0454b0e00ab3a6", "content_id": "419b072c226aa0001ed23c1a0800c38afb386bbf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "permissive", "max_line_length": 79, "num_lines": 12, "path": "/Python/Itertools/Itertools.permutations/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/itertools-permutations/problem\n\"\"\"\n\nfrom itertools import permutations\n\nif __name__ == \"__main__\":\n S, k = input().split()\n permutationList = sorted(list(permutations(S, int(k))))\n for i in permutationList:\n print(\"\".join(i))\n" }, { "alpha_fraction": 0.6146694421768188, "alphanum_fraction": 0.6167355179786682, "avg_line_length": 31.266666412353516, "blob_id": "940f94ae867de9bcbedc38b333fef9b242d49ff3", "content_id": "bb1203388e8e1975df86bd2890ae2f4e458abd0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 968, "license_type": "permissive", "max_line_length": 104, "num_lines": 30, "path": "/Python/Strings/Merge the Tools/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/merge-the-tools/editorial\n\"\"\"\nfrom collections import OrderedDict\n\n\ndef merge_the_tools(string, k):\n subsequents = [string[i:i+k] for i in range(0, len(string), k)]\n for subset in subsequents:\n print(\"{}\".format(\"\".join(OrderedDict.fromkeys(subset))))\n # print(\"{}\".format(\"\".join(set(subset)))) #If we do not care about order, we can use this line.\n\n\n# If you do not want to use any modul you can use this function\ndef merge_the_tools_without_modul(string, k):\n subsequents = [string[i:i+k] for i in range(0, len(string), k)]\n for i in range(len(subsequents)):\n subset = \"\"\n for letter in subsequents[i]:\n if letter not in subset:\n subset += letter\n print(subset)\n\n\nif __name__ == '__main__':\n string, k = input(), int(input())\n merge_the_tools(string, k)\n print(\"\")\n merge_the_tools_without_modul(string, k)\n" }, { "alpha_fraction": 0.47685185074806213, "alphanum_fraction": 0.5, "avg_line_length": 26, "blob_id": "5f4efccfa0a93885ed169e782eec63ecc31db007", "content_id": "24204dda4c4c919bbefbda15dfe83569192c3f5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "permissive", "max_line_length": 65, "num_lines": 8, "path": "/30 Days of Code/Day 05 - Loops/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/30-loops/problem\n\"\"\"\nif __name__ == '__main__':\n n = int(input())\n for i in range(1,11):\n print(\"{} x {} = {}\".format(n, i, n*i))\n" }, { "alpha_fraction": 0.5464788675308228, "alphanum_fraction": 0.5464788675308228, "avg_line_length": 31.272727966308594, "blob_id": "2fcda500c9a8ef1a0a6f79288da0ca9cab6516c2", "content_id": "cb65730bc970d54be3520bca8ce2503df46fa8ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "permissive", "max_line_length": 72, "num_lines": 11, "path": "/Python/Sets/Check Subset/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/py-check-subset/problem\n\"\"\"\nif __name__ == \"__main__\":\n for _ in range(int(input())):\n numberOfA = int(input())\n setA = set(map(int, input().split()))\n numberOfA = int(input())\n setB = set(map(int, input().split()))\n print(setA.issubset(setB))\n" }, { "alpha_fraction": 0.49884527921676636, "alphanum_fraction": 0.5011547207832336, "avg_line_length": 20.649999618530273, "blob_id": "cfa9ab022ac72f11b1e947f10ec3eceff2a52816", "content_id": "8253f7632d3eaed27a66770bea8ca99f27c6ac71", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 433, "license_type": "permissive", "max_line_length": 73, "num_lines": 20, "path": "/Java/Introduction/Java End of File/Solution.java", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "/*\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/java-end-of-file/problem\n ------------------------\n*/\n\nimport java.util.*;\n\npublic class Solution {\n\n public static void main(String[] args) {\n Scanner scan = new Scanner(System.in);\n int n = 1;\n while(scan.hasNext()){\n System.out.println(n + \" \" +scan.nextLine());\n n++;\n }\n scan.close();\n }\n}\n" }, { "alpha_fraction": 0.6311688423156738, "alphanum_fraction": 0.6311688423156738, "avg_line_length": 34, "blob_id": "8a703bc17241cba4c71154b71de4a0d4d2026c58", "content_id": "302169a0a62a5a64e955820447588c8d2fd51d3e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "permissive", "max_line_length": 74, "num_lines": 11, "path": "/Python/Itertools/Itertools.product()/solution.py", "repo_name": "aesavas/HackerRank", "src_encoding": "UTF-8", "text": "\"\"\"\n author : Ali Emre SAVAS\n Link : https://www.hackerrank.com/challenges/itertools-product/problem\n\"\"\"\nfrom itertools import product\n\nif __name__ == \"__main__\":\n setA = list(map(int, input().split()))\n setB = list(map(int, input().split()))\n # We change i as a string because join() method does not accept tuple.\n print(\" \".join(str(i) for i in list(product(setA, setB))))\n" } ]
68
mjhundekar/CSCI-561-HW2
https://github.com/mjhundekar/CSCI-561-HW2
8b0ce204a526cdd4b4fcdce1760e736705d0e9ac
e344d3b2600f53973e36ab20bfcf8dd09790993f
32e5d5db996c7a9beea7014db3a0062889bbee7b
refs/heads/master
2021-01-10T06:42:06.411199
2016-02-29T14:46:02
2016-02-29T14:46:02
52,790,643
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4969225823879242, "alphanum_fraction": 0.5030774474143982, "avg_line_length": 24.941177368164062, "blob_id": "e170226eef6a5d22df23233365e2c6dad9030be9", "content_id": "0ea1b7adcce5a1d4c543367283cd7be4bd6d2d06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3087, "license_type": "no_license", "max_line_length": 71, "num_lines": 119, "path": "/hw2cs561s16.py", "repo_name": "mjhundekar/CSCI-561-HW2", "src_encoding": "UTF-8", "text": "import sys\nimport collections\n\nOPERATORS = ['&&', '~', '=>']\n\nKB = collections.OrderedDict()\n\nquery = []\n\ndef process_input(fn):\n file_handle = open(fn, \"r\")\n line_counter = 0\n input_sentences = []\n\n for line in file_handle:\n if line_counter == 0:\n q = line.strip('\\n\\r')\n query = pre_parse_facts(q)\n line_counter += 1\n continue\n if line_counter ==1:\n fact_count = int(line.strip('\\n\\r'))\n line_counter +=1\n continue\n if line_counter >= 2:\n # Parse the facts here\n fact = line.strip('\\n\\r')\n # Pre-process line for easier processing\n fact_list = pre_parse_facts(fact)\n print fact_list\n input_sentences.append(fact_list)\n continue\n print \"File Parsed\\n\\n\\n\"\n for sen in input_sentences:\n print sen\n\n construct_KB(input_sentences)\n # a_clause = convert_list_to_clause(sen)\n # KB.append(a_clause)\n # print \"Final KB\\n\\n\\n\"\n # for k in KB:\n # print k\n\n\ndef construct_KB(input_sentences):\n print \"Constructing KB\\n\\n\\n\"\n for sen in input_sentences:\n if '=>' in sen:\n implication_pos = sen.index('=>')\n lhs = sen[:implication_pos]\n rhs = sen[implication_pos + 1:]\n KB[rhs[0]] = [rhs[1], lhs]\n else:\n var = sen[1:]\n print var\n KB[sen[0]] = sen[1:]\n\n print \"Final KB\\n\\n\\n\"\n for k, v in KB.items():\n print k, ': ', v\n\n print 'Test\\n'\n if 'Traitor' in KB:\n items = KB['Traitor']\n print len(items)\n # if len is 2 then implication\n # else it is a fact\n for i in range(len(items)):\n # if isinstance(i,list):\n for j in range(len(items[i])):\n # check if j is in dict if yes its a function\n # else its a parameter list\n print i, ', ', j\n print items[i][j]\n if 'z' in items[i][j]:\n p = items[i][j].index('z')\n items[i][j][p] = 'p'\n print 'Success', items[i][j]\n\n\n\n\n\ndef pre_parse_facts(fact):\n fact = '(' + fact + ')'\n fact = fact.replace('(', ' ( ')\n fact = fact.replace(')', ' ) ')\n fact = fact.replace(',', ', ')\n fact_list = fact.split()\n fact_list = parse_facts(fact_list)\n return fact_list\n\n\ndef parse_facts(fact_list):\n first_token = fact_list.pop(0)\n\n if first_token == '(':\n # start of a new expression\n new_expression = []\n while fact_list[0] != ')':\n # keep appending values to the new expression list\n new_expression.append(parse_facts(fact_list))\n # remove the ')'\n fact_list.pop(0)\n return new_expression\n else:\n # code is here means token is not the start of a new expression\n return first_token\n\n\ndef main():\n file_name = sys.argv[2]\n process_input(file_name)\n\n # process_input('sample01.txt')\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
mattwparas/ultimate-guitar-chain
https://github.com/mattwparas/ultimate-guitar-chain
55dd8963369ef9bbe586ca593f33077615f44bbf
3b1f65cd172ef9d43a1253747350718519ea8471
d45bed16ced0f1c9cc57c80ddd20402bbc5ffbbe
refs/heads/master
2020-03-24T09:16:48.173921
2018-12-04T15:25:27
2018-12-04T15:25:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6029411554336548, "alphanum_fraction": 0.6329339146614075, "avg_line_length": 18.417783737182617, "blob_id": "fbcfb90aabf8e3cc58d193d7d4729814c4194a3f", "content_id": "34414c89165280e80797639ccf023aa5ae8091dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27540, "license_type": "no_license", "max_line_length": 158, "num_lines": 1417, "path": "/MarkovProject/newchord.py", "repo_name": "mattwparas/ultimate-guitar-chain", "src_encoding": "UTF-8", "text": "from __future__ import division\nimport random \nfrom markov_thing import create_probabilities\n\nfrom numpy.random import choice\nimport pickle\n\n\n'''\nFixed Database information for reference\n'''\n\nNotes_Flats = ['A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab']\n\nNotes_Sharps = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']\n\nBig_Keyboard = ['A0', 'Bb0', 'B0', 'C1', 'Db1', 'D1', 'Eb1', 'E1', \n\t\t\t\t'F1', 'Gb1', 'G1', 'Ab1', 'A1', 'Bb1', 'B1', 'C2', \n\t\t\t\t'Db2', 'D2', 'Eb2', 'E2', 'F2', 'Gb2', 'G2', 'Ab2', \n\t\t\t\t'A2', 'Bb2', 'B2', 'C3', 'Db3', 'D3', 'Eb3', 'E3', \n\t\t\t\t'F3', 'Gb3', 'G3', 'Ab3', 'A3', 'Bb3', 'B3', 'C4', \n\t\t\t\t'Db4', 'D4', 'Eb4', 'E4', 'F4', 'Gb4', 'G4', 'Ab4', \n\t\t\t\t'A4', 'Bb4', 'B4', 'C5', 'Db5', 'D5', 'Eb5', 'E5', \n\t\t\t\t'F5', 'Gb5', 'G5', 'Ab5', 'A5', 'Bb5', 'B5', 'C6', \n\t\t\t\t'Db6', 'D6', 'Eb6', 'E6', 'F6', 'Gb6', 'G6', 'Ab6', \n\t\t\t\t'A6', 'Bb6', 'B6', 'C7', 'Db7', 'D7', 'Eb7', 'E7', \n\t\t\t\t'F7', 'Gb7', 'G7', 'Ab7', 'A7', 'Bb7', 'B7', 'C8']\n\nharmonic_minor = [0, 2, 3, 5, 7, 8, 11 ,12]\njazz_minor = [0, 2, 3, 5, 7, 9, 11, 12]\n\nionian = [0, 2, 4, 5, 7, 9, 11 ,12]\ndorian = [2, 4, 5, 7, 9, 11 ,12, 2]\nphyrgian = [4, 5, 7, 9, 11 ,12, 2, 4]\nlydian = [5, 7, 9, 11 ,12, 2, 4, 5]\nmixolydian = [7, 9, 11 ,12, 2, 4, 5, 7]\naeolian = [9, 11 ,12, 2, 4, 5, 7, 9]\nlocrian = [11 ,12, 2, 4, 5, 7, 9, 11]\n\nmodes = [ionian, dorian, phyrgian, lydian, mixolydian, aeolian, locrian]\n\n\n'''\nFunctions for creating scales and modes\n'''\ndef Give_Mode(name, notes, mode):\n\n\ti = notes.index(name)\n\n\tscale = [notes[((i + interval) % 12)] for interval in mode]\n\n\treturn scale\n\ndef Major(key, notes):\n\n\tprog = [Give_Mode(key, notes, mode) for mode in modes]\n\n\treturn prog\n\ndef Minor(key, notes):\n\ti = notes.index(key)\n\tnewKey = notes[(i + 3) % 12]\n\n\tstartIndex = modes.index(aeolian)\n\trotatedModes = modes[startIndex: ] + modes[:startIndex]\n\n\tprog = [Give_Mode(newKey, notes, mode) for mode in rotatedModes]\n\n\treturn prog \n\ndef Jazz_Minor(key, notes):\n\ti = notes.index(key)\n\n\ttemp = modes\n\n\ttemp[0] = jazz_minor\n\n\tfor mode in temp:\n\t\tmode = [3 if x==4 else x for x in mode]\n\n\tprog = [Give_Mode(key, notes, mode) for mode in temp]\n\n\treturn prog \n\n\t# create new mode set with adjusted minor third\n'''\ndef Harmonic_Minor(key, notes):\n\ti = notes.index(key)\n\n\ttemp = modes\n\n\ttemp[0] = jazz_minor\n\n\tfor mode in temp:\n\t\tmode = [3 if x==4 else x for x in mode]\n\n\tprog = [Give_Mode(key, notes, mode) for mode in temp]\n\n\treturn prog \n'''\n\n\ndef Mixolydian(key, notes):\n\ti = notes.index(key)\n\tnewKey = notes[(i + 5) % 12]\n\n\tstartIndex = modes.index(mixolydian)\n\trotatedModes = modes[startIndex: ] + modes[:startIndex]\n\n\tprog = [Give_Mode(newKey, notes, mode) for mode in rotatedModes]\n\n\treturn prog \n\ndef Phyrgian(key, notes):\n\ti = notes.index(key)\n\tnewKey = notes[(i + 8) % 12]\n\n\tstartIndex = modes.index(phyrgian)\n\trotatedModes = modes[startIndex: ] + modes[:startIndex]\n\n\tprog = [Give_Mode(newKey, notes, mode) for mode in rotatedModes]\n\n\treturn prog \n\n\ndef Simple_Triad(scale):\n\tchord = [\n\t\tscale[0],\n\t\tscale[2],\n\t\tscale[4]]\n\n\treturn chord\n\ndef Jazz_Triad_7th(scale):\n\tchord = [\n\t\tscale[0],\n\t\tscale[2],\n\t\tscale[4],\n\t\tscale[6]]\n\n\treturn chord\n\n\n\n\n'''Dictionaries for naming chords'''\n\nName_Dictionary = {(0, 3, 7): \"Minor\",\n\t\t\t\t\t(0, 3, 6): \"Dim\",\n\t\t\t\t\t(0, 4, 7): \"Major\",\n\t\t\t\t\t(0, 4, 8): \"Aug\",\n\t\t\t\t\t(0, 4, 7, 10): \"Dominant Seven\",\n\t\t\t\t\t(0, 3, 7, 10): \"Minor Seven\",\n\t\t\t\t\t(0, 4, 7, 11): \"Major Seven\",\n\t\t\t\t\t(0, 4, 8, 10): \"Aug Minor Seven\",\n\t\t\t\t\t(0, 3, 6, 9): \"Diminished Seven\",\n\t\t\t\t\t(0, 3, 6, 10): \"Half-diminshed Seven\",\n\t\t\t\t\t(0, 5, 7): \"Sus4\",\n\t\t\t\t\t(0, 2, 7): \"Sus2\",\n\t\t\t\t\t(0, 5, 10): \"Quartal\",\n\t\t\t\t\t(0, 5, 7, 10): \"7Sus4\",\n\t\t\t\t\t(0, 2, 7, 10): \"7Sus2\",\n\t\t\t\t\t(0, 5, 7, 10, 2): \"9Sus4\",\n\t\t\t\t\t(0, 4, 7, 9): \"Major Sixth\",\n\t\t\t\t\t(0, 4, 7, 8): \"Minor Sixth\",\n\t\t\t\t\t(0, 3, 7, 11): \"Major Minor Seventh\",\n\t\t\t\t\t(0, 4, 7, 14): \"Major Nine\",\n\t\t\t\t\t(0, 2, 4, 7): \"add9\",\n\t\t\t\t\t(0, 3, 7, 2): \"Minor Nine\",\n\t\t\t\t\t(0, 7): \"5\",\n\t\t\t\t\t(0, 3, 7, 10, 14, 17): \"Minor Eleventh\",\n\t\t\t\t\t(0, 4, 7, 11, 14, 17): \"Major Eleventh\",\n\t\t\t\t\t(0, 4, 7, 10, 14, 17): \"Dominant Eleventh\",\n\t\t\t\t\t(0, 4, 7, 10, 14, 17, 19): \"Dominant Thirteenth\",\n\t\t\t\t\t(0, 2, 7, 11): \"Major Seven Sus2\",\n\t\t\t\t\t(0, 5, 7, 11): \"Major Seven Sus4\",\n\t\t\t\t\t(0, 4, 7, 11, 18): \"Major Seven Sharp Eleven\"}\n\nReverse_Dictionary = {\"Minor\": (0, 3, 7),\n\t\t\t\t\t\"Dim\": (0, 3, 6),\n\t\t\t\t\t\"Major\": (0, 4, 7),\n\t\t\t\t\t\"Aug\": (0, 4, 8),\n\t\t\t\t\t\"Dominant Seven\": (0, 4, 7, 10),\n\t\t\t\t\t\"Minor Seven\": (0, 3, 7, 10),\n\t\t\t\t\t\"Major Seven\": (0, 4, 7, 11),\n\t\t\t\t\t\"Aug Minor Seven\": (0, 4, 8, 10),\n\t\t\t\t\t\"Diminished Seven\": (0, 3, 6, 9),\n\t\t\t\t\t\"Half-diminshed Seven\": (0, 3, 6, 10),\n\t\t\t\t\t\"Sus4\": (0, 5, 7),\n\t\t\t\t\t\"Sus2\": (0, 2, 7),\n\t\t\t\t\t\"Quartal\": (0, 5, 10),\n\t\t\t\t\t\"7Sus4\": (0, 5, 7, 10),\n\t\t\t\t\t\"7Sus2\": (0, 2, 7, 10),\n\t\t\t\t\t\"9Sus4\": (0, 5, 7, 10, 2),\n\t\t\t\t\t\"Major Sixth\": (0, 5, 7, 9),\n\t\t\t\t\t\"Minor Sixth\": (0, 5, 7, 8),\n\t\t\t\t\t\"Major Minor Seventh\": (0, 3, 7, 11),\n\t\t\t\t\t\"Major Nine\": (0, 4, 7, 14),\n\t\t\t\t\t\"add9\": (0, 2, 4, 7),\n\t\t\t\t\t\"Minor Nine\": (0, 3, 7, 14),\n\t\t\t\t\t\"5\": (0, 7),\n\t\t\t\t\t\"Minor Eleventh\": (0, 3, 7, 10, 14, 17),\n\t\t\t\t\t\"Major Eleventh\": (0, 4, 7, 11, 14, 17),\n\t\t\t\t\t\"Dominant Eleventh\": (0, 4, 7, 10, 14, 17),\n\t\t\t\t\t\"Dominant Thirteenth\": (0, 4, 7, 10, 14, 19),\n\t\t\t\t\t\"Major Seven Sus2\": (0, 2, 7, 11),\n\t\t\t\t\t\"Major Seven Sus4\": (0, 5, 7, 11),\n\t\t\t\t\t\"Major Seven Sharp Eleven\": (0, 4, 7, 11, 18)}\n\nUnique_Name_Dictionary = {\"m\": \"Minor\",\n\t\t\t\t\t\t\"dim\": \"Dim\",\n\t\t\t\t\t\t'': \"Major\",\n\t\t\t\t\t\t#(0, 4, 8): \"Aug\",\n\t\t\t\t\t\t\"7\": \"Dominant Seven\",\n\t\t\t\t\t\t\"m7\": \"Minor Seven\",\n\t\t\t\t\t\t#(0, 4, 7, 11): \"Major Seven\",\n\t\t\t\t\t\t#(0, 4, 8, 10): \"Aug Minor Seven\",\n\t\t\t\t\t\t#(0, 3, 6, 9): \"Diminished Seven\",\n\t\t\t\t\t\t#(0, 3, 6, 10): \"Half-diminshed Seven\",\n\t\t\t\t\t\t\"sus4\": \"Sus4\",\n\t\t\t\t\t\t\"sus2\": \"Sus2\",\n\t\t\t\t\t\t\"sus\": \"Sus4\",\n\t\t\t\t\t\t#(0, 5, 10): \"Quartal\",\n\t\t\t\t\t\t\"7sus4\": \"7Sus4\",\n\t\t\t\t\t\t\"7sus\": \"7Sus4\",\n\t\t\t\t\t\t\"9\": \"Major Nine\",\n\t\t\t\t\t\t\"maj9\": \"Major Nine\",\n\t\t\t\t\t\t\"m9\": \"Minor Nine\",\n\t\t\t\t\t\t\"add9\": \"add9\",\n\t\t\t\t\t\t\"maj7\": \"Major Seven\",\n\n\t\t\t\t\t\t#(0, 5, 7, 10, 2): \"9Sus4\",\n\t\t\t\t\t\t\"6\" : \"Major Sixth\",\n\t\t\t\t\t\t\"m6\": \"Minor Sixth\",\n\t\t\t\t\t\t\"mmaj7\": \"Major Minor Seventh\",\n\t\t\t\t\t\t\"M7\": \"Major Seven\",\n\t\t\t\t\t\t\"dim7\": \"Diminished Seven\",\n\t\t\t\t\t\t\"5\": \"5\",\n\t\t\t\t\t\t\"Maj9\": \"Nine\",\n\t\t\t\t\t\t\"2\": \"Sus2\",\n\t\t\t\t\t\t\"4\": \"Sus4\",\n\t\t\t\t\t\t\"madd9\": \"Minor Nine\",\n\t\t\t\t\t\t\"madd11\": \"Minor Eleventh\",\n\t\t\t\t\t\t\"add11\": \"Major Eleventh\",\n\t\t\t\t\t\t\"11\": \"Dominant Eleventh\",\n\t\t\t\t\t\t\"13\": \"Dominant Thirteenth\",\n\t\t\t\t\t\t\"m7add11\": \"Minor Eleventh\",\n\t\t\t\t\t\t\"maj7sus2\": \"Major Seven Sus2\",\n\t\t\t\t\t\t\"maj7sus4\": \"Major Seven Sus4\",\n\t\t\t\t\t\t\"7Sus2\": \"7Sus2\",\n\t\t\t\t\t\t\"7sus2\": \"7Sus2\"}\n\n# Give name of a chord based on input of notes\n# chord->[list of notes]\n# notes->[white and black keys (flat or sharp)]\n\n\n# Add something to deal with input of something that isnt a chord????\ndef Chord_name(chord, notes):\n\n\ti = notes.index(chord[0])\n\n\tintervals = []\n\n\tfor note in chord:\n\t\tj = i\n\t\ttemp_interval = 0\n\t\twhile(notes[j] != note):\n\t\t\tj = (j + 1) % 12\n\t\t\ttemp_interval = temp_interval + 1\n\t\tintervals.append(temp_interval)\n\n\ttup_int = tuple(intervals)\n\ttemp = Name_Dictionary[tup_int]\n\n\treturn chord[0] + \" \" + temp\n\ndef Keyboard_Chord(chord, notes):\n\n\ti = notes.index(chord[0])\n\n\tintervals = []\n\n\tfor note in chord:\n\t\tj = i\n\t\ttemp_interval = 0\n\t\twhile(notes[j] != note):\n\t\t\tj = (j + 1) % 12\n\t\t\ttemp_interval = temp_interval + 1\n\t\tintervals.append(temp_interval)\n\n\t# intervals\n\t# find starting note\n\tnew_chord = []\n\tstarting_index = Big_Keyboard.index(chord[0] + '3')\n\n\tfor interval in intervals:\n\t\tnew_chord.append(Big_Keyboard[starting_index + interval])\n\n\treturn new_chord\n\n\ndef Create_Prog(key, style, notes, chords, seventh):\n\ttemp = Progression(key, style, notes, chords)\n\ttemp.generate_modes()\n\ttemp.generate_triads(seventh)\n\ttemp.give_names()\n\n\treturn temp\n\n\ndef Give_Chord_Numbers(key, style, notes, list_of_chord_names):\n\tprog = Create_Prog(key, style, notes, [1, 2, 3, 4, 5, 6, 7])\n\ttemp = []\n\n\tfor chord in list_of_chord_names:\n\t\ti = prog.names.index(chord)\n\t\ttemp.append(i + 1)\n\n\treturn temp\n\n\ndef Give_Notes(chord_name, notes):\n\n\ttemp = chord_name.partition(\" \")\n\n\tkey = temp[0]\n\tchord_type = temp[2]\n\n\n\ti = notes.index(key)\n\ttemp2 = Reverse_Dictionary[chord_type]\n\n\tfinal = [notes[((i + interval) % 12)] for interval in temp2]\n\n\n\treturn final \n\ndef Special_Name(chord_name):\n\n\tslash = False\n\n\tif '/' in chord_name:\n\n\t\tslash = True\n\n\t\ttemp = chord_name.partition('/')\n\n\t\troot = temp[-1]\n\n\t\tif root in Notes_Sharps:\n\n\t\t\tnew_index = Notes_Sharps.index(root)\n\n\t\t\troot = Notes_Flats[new_index]\n\n\n\t\tchord_name = temp[0]\n\n\tunique = []\n\n\t#########################\n\n\tmodifiers = ['di', 'm', 'M', 'aj', 'add', '5', '6', '7', '9', '11', '13','sus', '2', '4']\n\n\tfor modifier in modifiers:\n\n\t\tif modifier != '' and modifier in chord_name:\n\n\t\t\tif modifier == 'm' and 'mm' in chord_name:\n\t\t\t\tunique.append('mm')\n\n\t\t\telse:\n\t\t\t\tunique.append(modifier)\n\n\n\tnew_name = ''.join(unique)\n\n\tchord_type = Unique_Name_Dictionary[new_name]\n\n\tkey = chord_name.replace(new_name, '')\n\n\tif key in Notes_Sharps:\n\n\t\ti = Notes_Sharps.index(key)\n\t\n\telse:\n\n\t\ti = Notes_Flats.index(key)\n\n\ttemp2 = Reverse_Dictionary[chord_type]\n\n\tfinal = [Notes_Flats[((i + interval) % 12)] for interval in temp2]\n\n\tchord_object = Chord(final, chord_name)\n\n\n\t# put key identifier function here \n\t# make note if it is slash on the object\n\t# when determining the key->check if the root is the root, if not \n\n\tif(slash):\n\t\tchord_object.add_note_to_beginning(root)\n\n\tchord_object.make_flats()\n\n\treturn chord_object\n\n\ndef Generator_1(number):\n\ttemp = [2, 3, 4, 5, 6, 7]\n\toutput = []\n\n\ti = number - 3\n\n\twhile(i > 0):\n\t\toutput.append(random.choice(temp))\n\t\ti = i - 1\n\n\toutput.insert(0, 1)\n\toutput.append(5)\n\toutput.append(1)\n\n\treturn output\n\ndef Generator_2():\n\ttemp = [1, 2, 3, 4, 5, 6, 7]\n\n\treturn random.choice(temp)\n\n'''\ndef Create_Melody(prog):\n\toutput = []\n\tfor mode in prog.modes:\n\t\toutput.append(mode[Generator_2()])\n\treturn output\n'''\n\n\nclass Chord:\n\tdef __init__ (self, list_of_notes, name):\n\t\tself.name = name\n\t\tself.list_of_notes = list_of_notes\n\t\tself.root = list_of_notes[0]\n\n\tdef add_note_to_beginning(self, note):\n\t\tself.list_of_notes.insert(0, note)\n\n\tdef make_flats(self):\n\n\t\tfor note in self.list_of_notes:\n\n\t\t\tif note in Notes_Sharps:\n\n\t\t\t\ttemp = Notes_Sharps.index(note)\n\n\t\t\t\tnote = Notes_Flats[temp]\n\n\nclass Ultimate_Guitar_Progression:\n\n\tdef __init__(self, chord_objects, key, numerals):\n\n\t\tself.chords = chord_objects\n\n\t\tself.key = key\n\n\t\tself.numerals = numerals\n\n\n\tdef add_progression(self, url):\n\n\t\tself.url = url\n\n\tdef play(self, tempo):\n\n\t\tplay_progression(self.chords, Notes_Flats, tempo)\n\n\tdef add_probabilities(self):\n\n\t\tself.numeral_matrix = create_probabilities(self.numerals)\n\n\t\tnames = []\n\n\t\tfor chord in self.chords:\n\n\t\t\tnames.append(chord.name)\n\n\t\tself.chord_matrix = create_probabilities(names)\n\n\n\n\n#Progression Class:\n\nclass Progression:\n\tdef __init__ (self, key, style, notes, chords):\n\t\tself.key = key\n\t\tself.style = style\n\t\tself.notes = notes\n\t\tself.chords = chords #list\n\t\tself.modes = []\n\t\tself.triads = []\n\t\tself.names = []\n\n\n\tdef add_chords_to_progression(self, chords):\n\t\tfor number in chords:\n\t\t\tself.chords.append(number)\n\n\n\tdef generate_modes(self):\n\t\tif self.style == \"minor\" or self.style == \"Minor\":\n\t\t\ttemp = Minor(self.key, self.notes)\n\t\t\tfor chords in self.chords:\n\t\t\t\tself.modes.append(temp[chords - 1])\n\n\t\telif self.style == \"mix\" or self.style == \"Mix\":\n\t\t\ttemp = Mixolydian(self.key, self.notes)\n\t\t\tfor chords in self.chords:\n\t\t\t\tself.modes.append(temp[chords - 1])\n\n\t\telif self.style == \"phrygian\" or self.style == \"Phrygian\":\n\t\t\ttemp = Phyrgian(self.key, self.notes)\n\t\t\tfor chords in self.chords:\n\t\t\t\tself.modes.append(temp[chords - 1])\n\n\t\telif self.style == \"Jazz Minor\" or self.style == \"jazz minor\":\n\t\t\ttemp = Jazz_Minor(self.key, self.notes)\n\t\t\tfor chords in self.chords:\n\t\t\t\tself.modes.append(temp[chords - 1])\n\n\t\telse:\n\t\t\ttemp = Major(self.key, self.notes)\n\t\t\tfor chords in self.chords:\n\t\t\t\tself.modes.append(temp[chords - 1])\n\n\n\tdef generate_triads(self, style):\n\t\tif style == \"seventh\":\n\t\t\tfor mode in self.modes:\n\t\t\t\tself.triads.append(Jazz_Triad_7th(mode))\n\t\telse:\n\t\t\tfor mode in self.modes:\n\t\t\t\tself.triads.append(Simple_Triad(mode))\n\n\tdef give_names(self):\n\t\tfor chord in self.triads:\n\t\t\tself.names.append(Chord_name(chord, self.notes))\n\n\tdef play(self, tempo):\n\t\tplay_progression(self.triads, self.notes, tempo)\n\n\n\n# make the sequence of chords all start in the lower octave and then continue up to the higher octave\n# voicings are more accurate \n\n\n'''Sound Stuff'''\n\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nimport time\n\ndef detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10):\n '''\n sound is a pydub.AudioSegment\n silence_threshold in dB\n chunk_size in ms\n\n iterate over chunks until you find the first one with sound\n '''\n trim_ms = 0 # ms\n while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold:\n trim_ms += chunk_size\n\n return trim_ms\n\n\ndef remove_leading_silence(sound):\n\tstart_trim = detect_leading_silence(sound)\n\tduration = len(sound)\n\ttrimmed_sound = sound[start_trim:duration]\n\treturn trimmed_sound\n\n'''Creates fixed length chord'''\ndef Create_playable_chord_generic(chord, tempo):\n\t# make adjustment here #\n\tsounds = []\n\tadjusted_chord = Keyboard_Chord(chord, Notes_Flats)\n\tfor note in adjusted_chord:\n\t\tpath = \"/Users/mwparas/Documents/Python/Chord_Project/Pitches2/\" + note + \".wav\"\n\t\tsounds.append(AudioSegment.from_file(path))\n\n\t# sounds = [remove_leading_silence(sound) for sound in sounds]\n\n\tfirst = sounds.pop(0)\n\n\twhile len(sounds) > 0:\n\t\ttemp = sounds.pop(0)\n\t\tfirst = first.overlay(temp)\n\n\twhole_note = 4 / (tempo / 60)\n\n\tsilence = AudioSegment.silent(duration=whole_note*1000)\n\tfinal = silence.overlay(first)\n\n\treturn final \n\n\ndef play_progression(progression, notes, tempo):\n\tchords = []\n\tfor chord in progression:\n\t\t# print Chord_name(chord, Notes_Flats), chord\n\t\tprint chord.list_of_notes, chord.name\n\t\tchords.append(Create_playable_chord_generic(chord.list_of_notes, tempo))\n\n\tfirst = chords.pop(0)\n\n\twhile len(chords) > 0:\n\t\ttemp = chords.pop(0)\n\t\tfirst = first.append(temp, crossfade=0)\n\n\tplay(first)\n\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\ndef scraper(website):\n\n\tpage = requests.get(website)\n\n\tsoup = BeautifulSoup(page.content, 'html.parser')\n\n\tjunk = soup.find_all(class_=\"js-tab-content\")\n\n\tspans = junk[0].find_all('span')\n\n\tChords = []\n\n\tfor span in spans:\n\n\t\ttemp = str(span.get_text())\n\t\tChords.append(temp)\n\n\treturn Chords\n\n\n\n\n\n\ndef information_getter(list_of_chord_objects):\n\n\tlist_of_chords = []\n\n\tlist_of_roots = []\n\n\tfor chord in list_of_chord_objects:\n\n\t\tlist_of_chords.append(chord.list_of_notes)\n\n\t\tlist_of_roots.append(chord.root)\n\n\tall_notes = sum(list_of_chords, [])\n\n\tlist_of_notes = list(set(all_notes))\n\n\ttupl_counts = []\n\n\tcounts = []\n\n\tfor note in list_of_notes:\n\n\t\ttemp = all_notes.count(note)\n\n\t\ttupl_counts.append((note, temp))\n\n\t\tcounts.append(temp)\n\n\t#### new thing\n\n\ttupl_counts = sorted(tupl_counts, key=lambda x: x[1])\n\n\ttupl_counts.reverse()\n\n\n\ttupl_lengths = []\n\n\tlengths = []\n\n\tpossible_keys = []\n\n\tfor note in Notes_Flats:\n\n\t\tscale = Give_Mode(note, Notes_Flats, ionian)\n\n\t\ttemp = set(scale).intersection(list_of_notes)\n\n\t\tlengths.append(len(temp))\n\n\t\ttupl_lengths.append((note, len(temp)))\n\n\tintersections = max(lengths)\n\n\tfor pair in tupl_lengths:\n\n\t\tif pair[1] == intersections:\n\n\t\t\tpossible_keys.append(pair[0])\n\t\n\tif len(possible_keys) == 1:\n\t\t\n\t\tmajor_key = possible_keys[0]\n\n\telse:\n\n\t\tnarrowing = []\n\n\t\tfor note in possible_keys:\n\n\t\t\tfor pair in tupl_counts:\n\n\t\t\t\tif note == pair[0]:\n\n\t\t\t\t\tnarrowing.append(pair)\n\n\t\tnarrowing = sorted(narrowing, key=lambda x: x[1])\n\n\t\tnarrowing.reverse()\n\n\t\tmajor_key = narrowing[0][0]\n\n\tmajor_scale = Give_Mode(major_key, Notes_Flats, ionian)\n\n\tminor_scale = Give_Mode(major_key, Notes_Flats, aeolian)\n\n\tmajor_numeral = []\n\n\tminor_numeral = []\n\n\tfor note in list_of_roots:\n\n\t\tif note in major_scale and note in minor_scale:\n\n\t\t\tminor_numeral.append(minor_scale.index(note) + 1)\n\n\t\t\tmajor_numeral.append(major_scale.index(note) + 1)\n\n\tnumerals = [1, 2, 3, 4, 5, 6, 7]\n\n\tminor_counts = []\n\n\tmajor_counts = []\n\n\tfor numeral in numerals:\n\n\t\tcount_minor = minor_numeral.count(numeral)\n\n\t\tcount_major = major_numeral.count(numeral)\n\n\t\tminor_counts.append((numeral, count_minor))\n\n\t\tmajor_counts.append((numeral, count_major))\n\n\t# print minor_numeral\n\n\tif major_counts[0][1] > minor_counts[0][1]:\n\n\t\tfinal_key = major_key + ' ' + 'Major'\n\n\t\treturn Ultimate_Guitar_Progression(list_of_chord_objects, final_key, major_numeral)\n\n\telse:\n\n\t\tfinal_key = minor_scale[0] + ' ' + 'Minor'\n\n\t\treturn Ultimate_Guitar_Progression(list_of_chord_objects, final_key, minor_numeral)\n\n\n\t## determine if the key is major or minor \n\n\n\ndef overall_product(url):\n\n\ttemp = scraper(url)\n\n\tlist_of_chord_objects = []\n\n\tfor chord in temp:\n\n\t\tlist_of_chord_objects.append(Special_Name(chord))\n\n\tprogression_from_url = information_getter(list_of_chord_objects)\n\n\tprogression_from_url.add_progression(url)\n\n\tprogression_from_url.add_probabilities()\n\n\treturn progression_from_url\n\n\n\n\n\n\n\ndef merge_dicts(*dict_args):\n \"\"\"\n Given any number of dicts, shallow copy and merge into a new dict,\n precedence goes to key value pairs in latter dicts.\n \"\"\"\n result = {}\n for dictionary in dict_args:\n result.update(dictionary)\n return result\n\n\n\n\n\n'''\nfrank_zappa = pickle.load(open(\"frank_zappa\", \"rb\"))\ngenesis = pickle.load(open(\"genesis\", \"rb\"))\nled_zeppelin = pickle.load(open(\"led_zeppelin\", \"rb\"))\nrush = pickle.load(open(\"rush\", \"rb\"))\nthe_beatles = pickle.load(open(\"the_beatles\", \"rb\"))\nthe_eagles = pickle.load(open(\"the_eagles\", \"rb\"))\n\n\n\ntest_data = merge_dicts(frank_zappa, genesis, led_zeppelin, rush, the_beatles, the_eagles)\n\n\npickle.dump(test_data, open(\"test_data\", \"wb\"))\n'''\n'''\n\ntest_data = pickle.load(open(\"test_data\", \"rb\"))\n\ntemp = map_name_to_object(test_data)\n\npickle.dump(temp, open(\"name_map\", \"wb\"))\n'''\n\n\n'''\n\nrandom_everything = big_generate_progressions(first_order_probability_dictionary, second_order_probability_dictionary, count_probability_dictionary, name_map)\n\ntest_stuff = []\n\nfor chord in random_everything:\n\n\ttest_stuff.append(Special_Name(chord))\n\nproduct = information_getter(test_stuff)\n\nproduct.play(300)\n\n'''\n\n\n\ndef scraper_number2(url):\n\n\tpage = requests.get(url)\n\n\t# print page.status_code\n\n\tsoup = BeautifulSoup(page.content, 'html.parser')\n\n\t# temp = \"https://tabs.ultimate-guitar.com/g/genesis/\"\n\n\t# temp = \"https://tabs.ultimate-guitar.com/m/misc_soundtrack/\"\n\n\t# temp = \"https://tabs.ultimate-guitar.com/f/frank_zappa/\"\n\n\t# temp = \"https://tabs.ultimate-guitar.com/t/the_beatles/\"\n\n\t# temp = \"https://tabs.ultimate-guitar.com/l/led_zeppelin/\"\n\n\ttemp = \"https://tabs.ultimate-guitar.com/e/eagles/\"\n\n\tlinks = []\n\n\tfor a in soup.find_all('a', href=True):\n\n \t\t# print \"Found the URL:\", a['href']\n\n \t\ttest = a['href']\n\n\n \t\tif temp in test:\n\n \t\t\ttest = str(test)\n\n \t\t\tlinks.append(test)\n\n\n\treturn links\n\n\n\ndef scrape_and_save(url):\n\n\tlinks = scraper_number2(url)\n\n\tprogression_dictionary = {}\n\n\tfor link in links:\n\n\t\ttry:\n\t\t\t\n\t\t\ttest = link \n\n\t\t\t# temp = test.replace(\"https://tabs.ultimate-guitar.com/g/genesis/\", '')\n\n\t\t\t# temp = test.replace(\"https://tabs.ultimate-guitar.com/m/misc_soundtrack/\", '')\n\n\t\t\t# temp = test.replace(\"https://tabs.ultimate-guitar.com/f/frank_zappa/\", '')\n\n\t\t\t#temp = test.replace(\"https://tabs.ultimate-guitar.com/t/the_beatles/\", '')\n\n\t\t\t# temp = test.replace(\"https://tabs.ultimate-guitar.com/l/led_zeppelin/\", '')\n\n\t\t\ttemp = test.replace(\"https://tabs.ultimate-guitar.com/e/eagles/\", '')\n\n\t\t\ttemp = temp.replace(\"_crd.htm\", '') \n\n\t\t\tname = temp\n\n\t\t\tprint \"trying \" + name + \"\\n\"\n\n\t\t\tprogression_dictionary[name] = overall_product(link)\n\n\t\t\tprint \"Success! Sleeping...\" + \"\\n\"\n\n\t\t\ttime.sleep(30)\n\n\t\texcept:\n\n\t\t\tprint \"Failed on \" + link + \"\\n\"\n\n\t\t\ttime.sleep(30)\n\n\n\treturn progression_dictionary\n\n\n\ndef go_ahead_and_get_data(url, number_of_pages, name):\n\n\ttest = scrape_and_save(url)\n\n\tfor i in range(2, number_of_pages + 1):\n\n\t\tlink = \"https://www.ultimate-guitar.com/tabs/eagles_chords_tabs\" + str(i) + \".htm\"\n\n\t\ttemp = scrape_and_save(link)\n\n\t\ttest = merge_dicts(test, temp)\n\n\t\n\tpickle.dump(test, open(name, \"wb\"))\n\n\n# go_ahead_and_get_data(\"https://www.ultimate-guitar.com/tabs/the_beatles_chords_tabs.htm\", 13, \"the_beatles\")\n\n\n\n# go_ahead_and_get_data(\"https://www.ultimate-guitar.com/tabs/genesis_chords_tabs.htm\", 2, \"genesis\")\n\n\n# print overall_product(\"https://tabs.ultimate-guitar.com/g/genesis/a_trick_of_the_tail_crd.htm\")\n\n\n\n\n# go_ahead_and_get_data(\"https://www.ultimate-guitar.com/tabs/eagles_chords_tabs.htm\", 3, \"the_eagles\")\n\n\n\ndef transpose_and_store(progression):\n\n\t#test_data = {name -> progression}\n\n\tkey = progression.key\n\n\ttemp = key.partition(' ')\n\n\tkey_note = temp[0]\n\n\tchord_list = []\n\n\tfor chord in progression.chords:\n\n\t\tnew_chord = chord\n\n\t\tnew_chord.make_flats\n\n\t\tchord_list.append(new_chord)\n\n\tfinal = []\n\n\ti = Notes_Flats.index(key_note)\n\n\tfor chord in chord_list:\n\n\t\tintervals = []\n\n\t\tfor note in chord.list_of_notes:\n\n\t\t\tif note not in Notes_Flats:\n\n\t\t\t\traise Exception('For some reason, this chord had notes that did not exist!')\n\n\t\t\tj = i\n\n\t\t\ttemp_interval = 0\n\n\t\t\twhile(Notes_Flats[j] != note):\n\t\t\t\n\t\t\t\tj = (j + 1) % 12\n\n\t\t\t\ttemp_interval = temp_interval + 1\n\n\t\t\tintervals.append(temp_interval)\n\n\t\tintervals = tuple(intervals)\n\n\t\tset_of_final = (intervals)\n\n\t\tfinal.append(set_of_final)\n\n\treturn final\n\n\n\n\ndef read_and_give_notes(transposition, key):\n\n\ttransposed_chord_progression = []\n\n\ti = Notes_Flats.index(key)\n\n\tfor chord_formula in transposition:\n\n\t\tfinal = [Notes_Flats[((i + interval) % 12)] for interval in chord_formula]\n\n\t\ttransposed_chord_progression.append(final)\n\n\treturn transposed_chord_progression\n\n\n\n\n\n\ndef overall_tranposer(data_set):\n\n\tnew_dictionary = {}\n\n\ttotal_size = len(data_set)\n\n\tfor key in data_set:\n\n\t\ttry:\n\n\t\t\tprint key, total_size\n\n\t\t\tcurrent_progression = data_set[key]\n\n\t\t\tnew_dictionary[key] = transpose_and_store(current_progression)\n\n\t\t\ttotal_size = total_size - 1\n\n\t\texcept:\n\n\t\t\tprint \"failed on \", key\n\n\treturn new_dictionary\n\n\n\n\n# new thing to get markov'd -> make sequence for each chord consisting of (position, (intervals for chords))\n\n\n\ndef test_play_progression(progression, tempo):\n\tchords = []\n\tfor chord in progression:\n\t\t# print Chord_name(chord, Notes_Flats), chord\n\t\tprint chord\n\t\tchords.append(Create_playable_chord_generic(chord, tempo))\n\n\tfirst = chords.pop(0)\n\n\twhile len(chords) > 0:\n\t\ttemp = chords.pop(0)\n\t\tfirst = first.append(temp, crossfade=0)\n\n\tplay(first)\n\n\n#testing_things = read_and_give_notes(temp,'Gb')\n\n#test_play_progression(testing_things, 250)\n\n\n# test_data = pickle.load(open(\"test_data\", \"rb\"))\n\n# test = test_data['firth_of_fifth']\n\n'''\n\nchords = test.chords\n\nempty = []\n\nfor chord in chords:\n\n\tempty.append(chord.list_of_notes)\n\nprint empty\n'''\n\n# print transpose_and_store(test)\n\ndef find_most_common_chords(chord_dictionary):\n\n\tcount_dictionary = {}\n\n\tfor key in chord_dictionary.keys():\n\n\t\tprogression = chord_dictionary[key]\n\n\t\tfor chord in progression:\n\n\t\t\tif chord not in count_dictionary.keys():\n\n\t\t\t\tcount_dictionary[chord] = 1 / len(progression)\n\n\t\t\telse:\n\t\t\t\t\n\t\t\t\tcount_dictionary[chord] = count_dictionary[chord] + (1 / len(progression))\n\n\treturn count_dictionary\n\n\ndef probability_counts(chord_dictionary):\n\n\tcount_dictionary = find_most_common_chords(chord_dictionary)\n\n\ttotal_count = 0\n\n\tfor key in count_dictionary:\n\n\t\ttemp = count_dictionary[key]\n\n\t\ttotal_count = temp + total_count\n\n\tfor key in count_dictionary:\n\n\t\tcount_dictionary[key] = count_dictionary[key] / total_count\n\n\treturn count_dictionary\n\n\ndef first_order_markov_chain(data_set):\n\n\tcount_dictionary = {}\n\n\ttotal_unique_stuff = []\n\n\tfor key in data_set.keys():\n\n\t\tprogression = data_set[key]\n\n\t\tfor z in range(len(progression) - 2):\n\n\t\t\tkey = (progression[z], progression[z+1])\n\n\t\t\tif count_dictionary.has_key(key):\n\n\t\t\t\t# print key\n\n\t\t\t\tcount_dictionary[key] = count_dictionary[key] + 1\n\n\t\t\telse:\n\n\t\t\t\tcount_dictionary[key] = 1\n\n\titerating_list = count_dictionary.keys()\n\n\tfor z in iterating_list:\n\n\t\ttotal_count = 0\n\n\t\t# print total_count\n\n\t\tfor key in count_dictionary:\n\n\t\t\tif z[0] == key[0]:\n\n\t\t\t\ttotal_count = count_dictionary[key] + total_count\n\n\t\tfor key in count_dictionary:\n\n\t\t\tif z[0] == key[0]:\n\n\t\t\t\tcount_dictionary[key] = count_dictionary[key] / total_count\n\n\n\treturn count_dictionary\n\n\n\ndef second_order_markov_chain(data_set):\n\n\tcount_dictionary = {}\n\n\ttotal_unique_stuff = []\n\n\tfor key in data_set.keys():\n\n\t\tprogression = data_set[key]\n\n\t\tfor z in range(len(progression) - 2):\n\n\t\t\tkey = ((progression[z], progression[z+1]), progression[z+2])\n\n\t\t\tif count_dictionary.has_key(key):\n\n\t\t\t\t# print key\n\n\t\t\t\tcount_dictionary[key] = count_dictionary[key] + 1\n\n\t\t\telse:\n\n\t\t\t\tcount_dictionary[key] = 1\n\n\n\titerating_list = count_dictionary.keys()\n\n\tfor i in iterating_list:\n\n\t\t# print \"iterating over \", i\n\n\t\ttotal_count = 0\n\n\t\t# print total_count\n\n\t\tfor key in count_dictionary:\n\n\t\t\tif i[0] == key[0]:\n\n\t\t\t\ttotal_count = count_dictionary[key] + total_count\n\n\t\tfor key in count_dictionary:\n\n\t\t\t\tif i[0] == key[0] and total_count != 0:\n\n\t\t\t\t\tcount_dictionary[key] = count_dictionary[key] / total_count\n\n\n\treturn count_dictionary\n\n\n\ndef big_generate_progressions(first_order_probability_dictionary, second_order_probability_dictionary, style):\n\n\t'''\n\n\telements_1 = []\n\n\tweights_1 = []\n\n\tfor key in count_probability_dictionary:\n\n\t\telements_1.append(key)\n\n\t\tweights_1.append(count_probability_dictionary[key])\n\n\tfirst = choice(elements_1, p=weights_1)\n\n\t'''\n\n\tif style == \"Minor\" or style == \"minor\":\n\n\t\tfirst = (0, 3, 7)\n\n\telse:\n\n\t\tfirst = (0, 4, 7)\n\n\tfinal_progression = [first]\n\n\tcurrent = first\n\n\t# build second value:\n\n\telements = []\n\n\tweights = []\n\n\tfor key in first_order_probability_dictionary:\n\n\t\tif key[0] == current:\n\n\t\t\telements.append(key[1])\n\n\t\t\tweights.append(first_order_probability_dictionary[key])\n\n\tcurrent = choice(elements, p=weights)\n\n\n\tindex = 1\n\n\tfinal_progression.append(current)\n\n\t# build probability sets here:\n\n\twhile(len(final_progression) < 10):\n\n\t\telements2 = []\n\n\t\tweights2 = []\n\n\t\tfor key in second_order_probability_dictionary:\n\n\t\t\tif key[0] == (final_progression[index - 1], final_progression[index]):\n\n\t\t\t\telements2.append(key[1])\n\n\t\t\t\tweights2.append(second_order_probability_dictionary[key])\n\n\n\t\tcurrent = choice(elements, p=weights)\n\n\t\tindex = index + 1\n\n\t\tfinal_progression.append(current)\n\n\tfinal_progression.append(first)\n\n\n\treturn final_progression\n\n\n'''\n\n\n\n\ntest_data = pickle.load(open(\"transposed\", \"rb\"))\n\n\nfirst_order_probability_dictionary = first_order_markov_chain(test_data)\n\npickle.dump(first_order_probability_dictionary, open(\"new_first_order\", \"wb\"))\n\n\n\nsecond_order_probability_dictionary = second_order_markov_chain(test_data)\n\npickle.dump(second_order_probability_dictionary, open(\"new_second_order\", \"wb\"))\n\n\ncount_probability_dictionary = probability_counts(test_data)\n\npickle.dump(count_probability_dictionary, open(\"new_probability_counts\", \"wb\"))\n'''\n\n\n\n\nfirst_order_probability_dictionary = pickle.load(open(\"new_first_order\", \"rb\"))\n\nsecond_order_probability_dictionary = pickle.load(open(\"new_second_order\", \"rb\"))\n\n\n\ntemp = big_generate_progressions(first_order_probability_dictionary, second_order_probability_dictionary, \"Major\")\n\n\n\nnew_temp = read_and_give_notes(temp, \"Bb\")\n\ntest_play_progression(new_temp, 250)\n\n\n\n\n\n\n'''\ntest_data = pickle.load(open(\"genesis\", \"rb\"))\n\ntest = test_data['afterglow']\n\ntemp = transpose_and_store(test)\n\n\n\n# testing_things = read_and_give_notes(temp,'Bb')\n\ntest_play_progression(testing_things, 250)\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8012048006057739, "alphanum_fraction": 0.8012048006057739, "avg_line_length": 70.14286041259766, "blob_id": "c1e45dfe24553773182d9519a57187f7af145fb3", "content_id": "51d1e57fc836988345ecd176a9ed9eb5ec0709fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 498, "license_type": "no_license", "max_line_length": 124, "num_lines": 7, "path": "/README.md", "repo_name": "mattwparas/ultimate-guitar-chain", "src_encoding": "UTF-8", "text": "# Ultimate Guitar Web Scraper\n\nThis is a scraper implemented in python to scrape chord progressions from the website ultimate-guitar.com. The scraper works\nas a web crawler, where a seed page is given and the data is then scraped systematically by traversing all the links on the \npage. Once a page with chord progressions is found, the data is scraped and then saved to a json file.\n\nThe ultimate goal of this project was to scrape enough songs to seed a markov chain based on chord progressions.\n" }, { "alpha_fraction": 0.5631386637687683, "alphanum_fraction": 0.5715328454971313, "avg_line_length": 21.09677505493164, "blob_id": "7b497294f348d1bbb352fc41d90f3112d2e92558", "content_id": "9eaed7543582d35c9249b40bfa052c96132e3fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2740, "license_type": "no_license", "max_line_length": 90, "num_lines": 124, "path": "/MarkovProject/MarkovChain.py", "repo_name": "mattwparas/ultimate-guitar-chain", "src_encoding": "UTF-8", "text": "def find_most_common_chords(chord_dictionary):\n count_dictionary = {}\n\n for key in chord_dictionary.keys():\n\n progression = chord_dictionary[key]\n\n for chord in progression:\n\n if chord not in count_dictionary.keys():\n\n count_dictionary[chord] = 1 / len(progression)\n\n else:\n\n count_dictionary[chord] = count_dictionary[chord] + (1 / len(progression))\n\n return count_dictionary\n\n\ndef probability_counts(chord_dictionary):\n count_dictionary = find_most_common_chords(chord_dictionary)\n\n total_count = 0\n\n for key in count_dictionary:\n temp = count_dictionary[key]\n\n total_count = temp + total_count\n\n for key in count_dictionary:\n count_dictionary[key] = count_dictionary[key] / total_count\n\n return count_dictionary\n\n\ndef first_order_markov_chain(data_set):\n count_dictionary = {}\n\n total_unique_stuff = []\n\n for key in data_set.keys():\n\n progression = data_set[key]\n\n for z in range(len(progression) - 2):\n\n key = (progression[z], progression[z + 1])\n\n if count_dictionary.has_key(key):\n\n # print key\n\n count_dictionary[key] = count_dictionary[key] + 1\n\n else:\n\n count_dictionary[key] = 1\n\n iterating_list = count_dictionary.keys()\n\n for z in iterating_list:\n\n total_count = 0\n\n # print total_count\n\n for key in count_dictionary:\n\n if z[0] == key[0]:\n total_count = count_dictionary[key] + total_count\n\n for key in count_dictionary:\n\n if z[0] == key[0]:\n count_dictionary[key] = count_dictionary[key] / total_count\n\n return count_dictionary\n\n\ndef second_order_markov_chain(data_set):\n count_dictionary = {}\n\n total_unique_stuff = []\n\n for key in data_set.keys():\n\n progression = data_set[key]\n\n for z in range(len(progression) - 2):\n\n key = ((progression[z], progression[z + 1]), progression[z + 2])\n\n if count_dictionary.has_key(key):\n\n # print key\n\n count_dictionary[key] = count_dictionary[key] + 1\n\n else:\n\n count_dictionary[key] = 1\n\n iterating_list = count_dictionary.keys()\n\n for i in iterating_list:\n\n # print \"iterating over \", i\n\n total_count = 0\n\n # print total_count\n\n for key in count_dictionary:\n\n if i[0] == key[0]:\n total_count = count_dictionary[key] + total_count\n\n for key in count_dictionary:\n\n if i[0] == key[0] and total_count != 0:\n count_dictionary[key] = count_dictionary[key] / total_count\n\n return count_dictionary\n" }, { "alpha_fraction": 0.46590983867645264, "alphanum_fraction": 0.506519615650177, "avg_line_length": 27.863296508789062, "blob_id": "bf964b12de5697f84312545a82f6595cf9f798fb", "content_id": "f5f4af9630742cfcd326855f72d901c8d0a97d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15415, "license_type": "no_license", "max_line_length": 113, "num_lines": 534, "path": "/MarkovProject/Test.py", "repo_name": "mattwparas/ultimate-guitar-chain", "src_encoding": "UTF-8", "text": "from pydub import AudioSegment\nfrom pydub.playback import play\n\n\n# Contains interval information for each mode, as well as a \"modes\" list\nclass Mode(object):\n harmonic_minor = [0, 2, 3, 5, 7, 8, 11, 12]\n jazz_minor = [0, 2, 3, 5, 7, 9, 11, 12]\n\n ionian = [0, 2, 4, 5, 7, 9, 11, 12]\n dorian = [2, 4, 5, 7, 9, 11, 12, 2]\n phrygian = [4, 5, 7, 9, 11, 12, 2, 4]\n lydian = [5, 7, 9, 11, 12, 2, 4, 5]\n mixolydian = [7, 9, 11, 12, 2, 4, 5, 7]\n aeolian = [9, 11, 12, 2, 4, 5, 7, 9]\n locrian = [11, 12, 2, 4, 5, 7, 9, 11]\n\n modes = [ionian, dorian, phrygian, lydian, mixolydian, aeolian, locrian]\n\n\n# basic keyboard information for creating chords and playback\nclass Notes(object):\n flats = ['A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab']\n\n sharps = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']\n\n big_keyboard = ['A0', 'Bb0', 'B0', 'C1', 'Db1', 'D1', 'Eb1', 'E1',\n 'F1', 'Gb1', 'G1', 'Ab1', 'A1', 'Bb1', 'B1', 'C2',\n 'Db2', 'D2', 'Eb2', 'E2', 'F2', 'Gb2', 'G2', 'Ab2',\n 'A2', 'Bb2', 'B2', 'C3', 'Db3', 'D3', 'Eb3', 'E3',\n 'F3', 'Gb3', 'G3', 'Ab3', 'A3', 'Bb3', 'B3', 'C4',\n 'Db4', 'D4', 'Eb4', 'E4', 'F4', 'Gb4', 'G4', 'Ab4',\n 'A4', 'Bb4', 'B4', 'C5', 'Db5', 'D5', 'Eb5', 'E5',\n 'F5', 'Gb5', 'G5', 'Ab5', 'A5', 'Bb5', 'B5', 'C6',\n 'Db6', 'D6', 'Eb6', 'E6', 'F6', 'Gb6', 'G6', 'Ab6',\n 'A6', 'Bb6', 'B6', 'C7', 'Db7', 'D7', 'Eb7', 'E7',\n 'F7', 'Gb7', 'G7', 'Ab7', 'A7', 'Bb7', 'B7', 'C8']\n\n\nclass Chord(object):\n def __init__(self, list_of_notes, name):\n self.name = name\n self.list_of_notes = list_of_notes\n self.root = list_of_notes[0]\n\n def add_note_to_beginning(self, note):\n self.list_of_notes.insert(0, note)\n\n def make_flats(self):\n\n for note in self.list_of_notes:\n\n if note in Notes.sharps:\n temp = Notes.sharps.index(note)\n\n note = Notes.flats[temp]\n\n\nclass CustomProgression(object):\n def __init__(self, list_of_names):\n self.names = list_of_names\n\n self.chords = self.generate_notes()\n\n self.modes = self.map_chords_modes()\n\n\n def generate_notes(self):\n\n chord_output = [give_notes(name, Notes.flats) for name in self.names]\n\n return chord_output\n\n def map_chords_modes(self):\n\n mode_output = [chord_to_mode(name for name in self.names)]\n\n return mode_output\n\n def play(self, tempo = 200):\n play_progression(self.chords, Notes.flats, tempo)\n\n\n def display(self):\n\n for i in range(0, len(self.modes)):\n print self.names[i], self.modes[i]\n\n\n\n# Progression Class:\n\nclass Progression(object):\n def __init__(self, key, style, notes, chords, triads=\"normal\"):\n\n self.key = key\n self.style = style\n self.notes = notes\n self.chords = chords # list\n self.modes = self.generate_modes()\n self.triads = self.generate_triads(triads)\n self.names = self.give_names()\n\n def add_chords_to_progression(self, chords):\n for number in chords:\n self.chords.append(number)\n # update chords\n\n def generate_modes(self):\n if self.style == \"minor\" or self.style == \"Minor\":\n scale = minor(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n elif self.style == \"mixolydian\" or self.style == \"Mixolydian\":\n scale = give_mixolydian(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n elif self.style == \"phrygian\" or self.style == \"Phrygian\":\n scale = give_phyrgian(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n elif self.style == \"Jazz Minor\" or self.style == \"jazz minor\":\n scale = give_jazz_minor(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n else:\n scale = major(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n def generate_triads(self, style):\n new_triads = []\n if style == \"seventh\":\n for mode in self.modes:\n new_triads.append(jazz_triad_7th(mode))\n return new_triads\n else:\n for mode in self.modes:\n new_triads.append(simple_triad(mode))\n return new_triads\n\n def give_names(self):\n new_names = []\n for chord in self.triads:\n new_names.append(chord_name(chord, self.notes))\n return new_names\n\n def display(self):\n\n print self.key + ' ' + self.style\n\n i = 0\n\n while(i < len(self.modes)):\n\n print self.chords[i], self.names[i], self.triads[i]\n\n i = i + 1\n\n\n def play(self, tempo = 200):\n play_progression(self.triads, self.notes, tempo)\n\n\n'''\nFunctions for creating scales and modes\n'''\n\n\ndef give_mode(name, notes, mode):\n i = notes.index(name)\n\n scale = [notes[((i + interval) % 12)] for interval in mode]\n\n return scale\n\n\ndef major(key, notes):\n prog = [give_mode(key, notes, mode) for mode in Mode.modes]\n\n return prog\n\n\ndef minor(key, notes):\n i = notes.index(key)\n newKey = notes[(i + 3) % 12]\n\n startIndex = Mode.modes.index(Mode.aeolian)\n rotatedModes = Mode.modes[startIndex:] + Mode.modes[:startIndex]\n\n prog = [give_mode(newKey, notes, mode) for mode in rotatedModes]\n\n return prog\n\n\ndef give_jazz_minor(key, notes):\n temp = Mode.modes\n\n temp[0] = Mode.jazz_minor\n\n for mode in temp:\n mode = [3 if x == 4 else x for x in mode]\n\n prog = [give_mode(key, notes, mode) for mode in temp]\n\n return prog\n\n\ndef give_mixolydian(key, notes):\n i = notes.index(key)\n newKey = notes[(i + 5) % 12]\n\n startIndex = Mode.modes.index(Mode.mixolydian)\n rotatedModes = Mode.modes[startIndex:] + Mode.modes[:startIndex]\n\n prog = [give_mode(newKey, notes, mode) for mode in rotatedModes]\n\n return prog\n\n\ndef give_phyrgian(key, notes):\n i = notes.index(key)\n newKey = notes[(i + 8) % 12]\n\n startIndex = Mode.modes.index(Mode.phrygian)\n rotatedModes = Mode.modes[startIndex:] + Mode.modes[:startIndex]\n\n prog = [give_mode(newKey, notes, mode) for mode in rotatedModes]\n\n return prog\n\n\ndef simple_triad(scale):\n chord = [\n scale[0],\n scale[2],\n scale[4]]\n\n return chord\n\n\ndef jazz_triad_7th(scale):\n chord = [\n scale[0],\n scale[2],\n scale[4],\n scale[6]]\n\n return chord\n\n\nclass ChordDict(object):\n '''Dictionaries for naming chords'''\n\n Name_Dictionary = {(0, 3, 7): \"Minor\",\n (0, 3, 6): \"Dim\",\n (0, 4, 7): \"Major\",\n (0, 4, 8): \"Aug\",\n (0, 4, 7, 10): \"Dominant Seven\",\n (0, 3, 7, 10): \"Minor Seven\",\n (0, 4, 7, 11): \"Major Seven\",\n (0, 4, 8, 10): \"Aug Minor Seven\",\n (0, 3, 6, 9): \"Diminished Seven\",\n (0, 3, 6, 10): \"Half-diminished Seven\",\n (0, 5, 7): \"Sus4\",\n (0, 2, 7): \"Sus2\",\n (0, 5, 10): \"Quartal\",\n (0, 5, 7, 10): \"7Sus4\",\n (0, 2, 7, 10): \"7Sus2\",\n (0, 5, 7, 10, 2): \"9Sus4\",\n (0, 4, 7, 9): \"Major Sixth\",\n (0, 4, 7, 8): \"Minor Sixth\",\n (0, 3, 7, 11): \"Major Minor Seventh\",\n (0, 4, 7, 14): \"Major Nine\",\n (0, 2, 4, 7): \"add9\",\n (0, 3, 7, 2): \"Minor Nine\",\n (0, 7): \"5\",\n (0, 3, 7, 10, 14, 17): \"Minor Eleventh\",\n (0, 4, 7, 11, 14, 17): \"Major Eleventh\",\n (0, 4, 7, 10, 14, 17): \"Dominant Eleventh\",\n (0, 4, 7, 10, 14, 17, 19): \"Dominant Thirteenth\",\n (0, 2, 7, 11): \"Major Seven Sus2\",\n (0, 5, 7, 11): \"Major Seven Sus4\",\n (0, 4, 7, 11, 18): \"Major Seven Sharp Eleven\",\n (0, 5, 7, 13): \"Sus4 Flat9\"}\n\n Reverse_Dictionary = {\"Minor\": (0, 3, 7),\n \"Dim\": (0, 3, 6),\n \"Major\": (0, 4, 7),\n \"Aug\": (0, 4, 8),\n \"Dominant Seven\": (0, 4, 7, 10),\n \"Minor Seven\": (0, 3, 7, 10),\n \"Major Seven\": (0, 4, 7, 11),\n \"Aug Minor Seven\": (0, 4, 8, 10),\n \"Diminished Seven\": (0, 3, 6, 9),\n \"Half-diminished Seven\": (0, 3, 6, 10),\n \"Sus4\": (0, 5, 7),\n \"Sus2\": (0, 2, 7),\n \"Quartal\": (0, 5, 10),\n \"7Sus4\": (0, 5, 7, 10),\n \"7Sus2\": (0, 2, 7, 10),\n \"9Sus4\": (0, 5, 7, 10, 2),\n \"Major Sixth\": (0, 5, 7, 9),\n \"Minor Sixth\": (0, 5, 7, 8),\n \"Major Minor Seventh\": (0, 3, 7, 11),\n \"Major Nine\": (0, 4, 7, 14),\n \"add9\": (0, 2, 4, 7),\n \"Minor Nine\": (0, 3, 7, 14),\n \"5\": (0, 7),\n \"Minor Eleventh\": (0, 3, 7, 10, 14, 17),\n \"Major Eleventh\": (0, 4, 7, 11, 14, 17),\n \"Dominant Eleventh\": (0, 4, 7, 10, 14, 17),\n \"Dominant Thirteenth\": (0, 4, 7, 10, 14, 19),\n \"Major Seven Sus2\": (0, 2, 7, 11),\n \"Major Seven Sus4\": (0, 5, 7, 11),\n \"Major Seven Sharp Eleven\": (0, 4, 7, 11, 18),\n \"Sus4 Flat9\": (0, 5, 7, 13)}\n\n\n chord_mode_dictionary = {\"Minor\": Mode.aeolian,\n \"Dominant Seven\": Mode.mixolydian,\n \"Minor Seven\": Mode.dorian,\n \"Major Seven\": Mode.ionian,\n \"Major Seven Sharp Eleven\": Mode.locrian,\n \"Sus4 flat9\": Mode.phrygian,\n \"Half-diminished Seven\": Mode.locrian}\n\n\n\n\n\n# Add something to deal with input of something that isnt a chord????\ndef chord_name(chord, notes):\n i = notes.index(chord[0])\n\n intervals = []\n\n for note in chord:\n j = i\n temp_interval = 0\n while (notes[j] != note):\n j = (j + 1) % 12\n temp_interval = temp_interval + 1\n intervals.append(temp_interval)\n\n tup_int = tuple(intervals)\n temp = ChordDict.Name_Dictionary[tup_int]\n\n return chord[0] + \" \" + temp\n\n\ndef keyboard_chord(chord, notes):\n i = notes.index(chord[0])\n\n intervals = []\n\n for note in chord:\n j = i\n temp_interval = 0\n while notes[j] != note:\n j = (j + 1) % 12\n temp_interval = temp_interval + 1\n intervals.append(temp_interval)\n\n # intervals\n # find starting note\n new_chord = []\n starting_index = Notes.big_keyboard.index(chord[0] + '3')\n\n for interval in intervals:\n new_chord.append(Notes.big_keyboard[starting_index + interval])\n\n return new_chord\n\n\ndef give_chord_numbers(key, style, notes, list_of_chord_names):\n prog = Progression(key, style, notes, [1, 2, 3, 4, 5, 6, 7])\n temp = []\n\n for chord in list_of_chord_names:\n i = prog.names.index(chord)\n temp.append(i + 1)\n\n return temp\n\n\ndef give_notes(chord_name, notes):\n temp = chord_name.partition(\" \")\n\n key = temp[0]\n chord_type = temp[2]\n\n i = notes.index(key)\n temp2 = ChordDict.Reverse_Dictionary[chord_type]\n\n final = [notes[((i + interval) % 12)] for interval in temp2]\n\n return final\n\n\n\n# make the sequence of chords all start in the lower octave and then continue up to the higher octave\n# voicings are more accurate\n\n\n'''Sound Stuff'''\n\n\ndef detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10):\n '''\n sound is a pydub.AudioSegment\n silence_threshold in dB\n chunk_size in ms\n\n iterate over chunks until you find the first one with sound\n '''\n trim_ms = 0 # ms\n while sound[trim_ms:trim_ms + chunk_size].dBFS < silence_threshold:\n trim_ms += chunk_size\n\n return trim_ms\n\n\ndef remove_leading_silence(sound):\n start_trim = detect_leading_silence(sound)\n duration = len(sound)\n trimmed_sound = sound[start_trim:duration]\n return trimmed_sound\n\n\n'''Creates fixed length chord'''\n\n\ndef create_playable_chord_generic(chord, tempo):\n # make adjustment here #\n sounds = []\n adjusted_chord = keyboard_chord(chord, Notes.flats)\n for note in adjusted_chord:\n path = \"/Users/mwparas/Documents/Python/Chord_Project/Pitches2/\" + note + \".wav\"\n sounds.append(AudioSegment.from_file(path))\n\n # sounds = [remove_leading_silence(sound) for sound in sounds]\n\n first = sounds.pop(0)\n\n while len(sounds) > 0:\n temp = sounds.pop(0)\n first = first.overlay(temp)\n\n whole_note = 4 / (tempo / 60)\n\n silence = AudioSegment.silent(duration=whole_note * 1000)\n final = silence.overlay(first)\n\n return final\n\n\ndef play_progression(progression, notes, tempo = 200):\n chords = []\n for chord in progression:\n print chord_name(chord, Notes.flats), chord\n # print chord.list_of_notes, chord.name\n chords.append(create_playable_chord_generic(chord, tempo))\n\n first = chords.pop(0)\n\n while len(chords) > 0:\n temp = chords.pop(0)\n first = first.append(temp, crossfade=0)\n\n play(first)\n\n\n\n# Cost Function\n# Moves from chord to chord and creates a recommended set of notes to use\n# Next set of notes is dependent on what was previously played, to create something that logistically makes sense\n\n\n# Enter Chord progression\n\n# Give options for something simple, like Bb blues\n\n# Display possible notes in each of the chords\n\n# Provide interactive experience, select notes that want to start and end on\n\n# Create cost function for possible notes, as in create weighting that changes from measure to measure\n\n\ndef chord_to_mode(chord_name):\n temp = chord_name.partition(\" \")\n\n key = temp[0]\n chord_type = temp[2]\n\n temp2 = ChordDict.chord_mode_dictionary[chord_type]\n\n return give_mode(key, Notes.flats, temp2)\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n test = CustomProgression([\"Bb Minor Seven\", \"G Dominant Seven\", \"C Minor Seven\", \"F Dominant Seven\"])\n\n test.display()\n\n #test.play()\n\n #test2 = Progression(\"A\", \"Mixolydian\", Notes.flats, [1, 5, 6, 3, 4, 1, 4, 5, 1])\n\n #test2.display()\n\n #test2.play()\n\n\n" }, { "alpha_fraction": 0.5588071346282959, "alphanum_fraction": 0.5732998847961426, "avg_line_length": 35.60714340209961, "blob_id": "8bba3d70696c53fa2ad116c3e0e5f766eedf2a8c", "content_id": "33cbb0660774e5dbaba5c8d4d985b90aea5d6693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7176, "license_type": "no_license", "max_line_length": 135, "num_lines": 196, "path": "/scraper.py", "repo_name": "mattwparas/ultimate-guitar-chain", "src_encoding": "UTF-8", "text": "from __future__ import division\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport json\nimport random\n\n\n\ndef get_script(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html5lib')\n script_list = soup.find_all('script')\n selected_script = script_list[9]\n return selected_script\n\ndef find_song_links(url):\n selected_script = get_script(url)\n links_list = re.findall('(?<=tab_url\":\")(.*?)(?=\",)', str(selected_script))\n links_list = [link.replace(\"\\\\\", \"\") for link in links_list]\n return links_list\n\ndef find_artist_links(url):\n retry_count = 0\n links_list = []\n while(not links_list and retry_count <= 5):\n try:\n selected_script = get_script(url)\n links_list = re.findall('(?<=},\"artist_url\":\")(.*?)(?=\",)', str(selected_script))\n links_list = [link.replace(\"\\\\\", \"\") for link in links_list]\n chord_filter = \"?filter=chords\"\n links_list = [link + chord_filter for link in links_list]\n except:\n print(\"Error finding artist links, retrying...\")\n time.sleep(random.uniform(3, 6))\n retry_count += 1\n return set(links_list)\n\ndef save_to_json(path, data):\n js = json.dumps(data)\n fp = open(path, 'a')\n fp.write(js)\n fp.close()\n\ndef open_file(path):\n json1_file = open(path)\n json1_str = json1_file.read()\n json1_data = json.loads(json1_str)\n return json1_data\n\ndef song_scraper(url):\n selected_script = get_script(url)\n chord_list = re.findall('(?<=\\[ch\\])(.*?)(?=\\[\\\\\\/ch\\])', str(selected_script))\n chord_list = [chord.replace(\"\\\\\", \"\") for chord in chord_list]\n bulk_info = re.search('(?<=,\"tab\":{)(.*?)(?=,\"date_update\":\")', str(selected_script)).group()\n song_dict = json.loads(\"{\"+bulk_info+\"}\")\n song_dict[\"chord_progression\"] = chord_list\n return song_dict\n\ndef artist_find_next_pages(url):\n selected_script = get_script(url)\n links_list = re.findall('(?<=\"pages\":)(.*?)(?=,\"sorting\")', str(selected_script))\n # retry\n if not links_list:\n tries = 0\n while(not links_list and tries <= 4):\n print(\"Retrying to find links to other pages at:\", url)\n time.sleep(5)\n selected_script = get_script(url)\n links_list = re.findall('(?<=\"pages\":)(.*?)(?=,\"sorting\")', str(selected_script))\n tries += 1\n if links_list:\n print(\"Successfully found links!\")\n page_dict = json.loads('{\"pagination\":' + links_list[0])\n #print(page_dict)\n base_url = \"https://www.ultimate-guitar.com\"\n page_links = [base_url + x[\"url\"]for x in page_dict[\"pagination\"]]\n return page_links\n else:\n return []\n\ndef find_next_pages(url):\n selected_script = get_script(url)\n pagination = re.findall('(?<=\"data\":)(.*?)(?=,\"totalResults\")', str(selected_script))\n # retry\n if not pagination:\n tries = 0\n while(not pagination and tries <= 4):\n print(\"Retrying to find links to other pages at:\", url)\n time.sleep(5)\n selected_script = get_script(url)\n pagination = re.findall('(?<=\"data\":)(.*?)(?=,\"totalResults\")', str(selected_script))\n tries += 1\n if pagination:\n page_dict = json.loads(pagination[0] + \"}\")\n max_pages = page_dict[\"pagination\"][\"pages\"] + 1\n base_url = url[:-14]\n chord_filter = \"&type[]=Chords\"\n links_list = [base_url + \"&page=\" + str(i) + chord_filter for i in range(1, max_pages)]\n return links_list\n else:\n return []\n\n#print(find_next_pages(\"https://www.ultimate-guitar.com/explore?genres[]=4&type[]=Chords\"))\n\n#print(artist_find_next_pages(\"https://www.ultimate-guitar.com/artist/elvis_presley_11125?filter=chords\"))\n\n\ndef crawler(url, path, filename):\n artist_visited_dictionary = {}\n song_urls = []\n print(\"Scanning for pages of links...\", url)\n # find links to each page of artists:\n total_page_links = find_next_pages(url)\n for page_link in total_page_links:\n print(\"Finding artist links...\", page_link)\n time.sleep(random.uniform(2.5, 4.3))\n artist_links = find_artist_links(page_link)\n for artist_link in artist_links:\n if(artist_link not in artist_visited_dictionary):\n artist_visited_dictionary[artist_link] = True\n time.sleep(random.uniform(2, 7))\n print(\"Finding pages for artist at:\", artist_link)\n artist_pages = artist_find_next_pages(artist_link)\n for artist_page in artist_pages:\n print(\"Adding songs to queue at:\", artist_page)\n time.sleep(5)\n song_urls = song_urls + find_song_links(artist_page)\n\n print(\"----------------Finished Scanning Pages----------------\")\n print(\"---------------Scraping Individual Songs---------------\")\n num_songs = len(song_urls)\n print(\"Number of Found Songs:\", num_songs)\n json_output = []\n song_count = 0\n save_count = 0\n for song in song_urls:\n try:\n print(\"Scraping at...\", song)\n time.sleep(random.uniform(1.5, 2.1))\n song_output = song_scraper(song)\n print(\"Successfully scraped:\", song_output[\"song_name\"])\n json_output.append(song_output)\n song_count += 1\n per_complete = 100*song_count / num_songs\n if song_count % 10 == 0:\n print(\"-----------------Songs Saved:\", song_count, \"-----------------\")\n print(\"------------\", per_complete, \"% complete------------\")\n if song_count % 50 == 0:\n new_file = path + filename + str(save_count + 1) + \".json\"\n print(\"Saving file now to\", new_file)\n save_to_json(new_file, json_output)\n save_count += 1\n json_output = []\n except:\n print(\"Error: Failed Scraping at\", url)\n new_file = path + filename + str(save_count + 1) + \".json\"\n print(\"Saving file now to\", new_file)\n save_to_json(new_file, json_output)\n print(\"----------------Scraping Complete!-----------------------\")\n return\n\n\n#output = crawler(\"https://www.ultimate-guitar.com/explore?decade[]=1950&genres[]=4&order=hitstotal_desc&type[]=Chords\")\npath = 'f:/WebCrawlers/RockMusic/2010Rock/'\nfilename = '2010batch'\n\ncrawler(\"https://www.ultimate-guitar.com/explore?decade[]=2010&genres[]=4&type[]=Chords\", path, filename)\n\n\n\n\n\n\n\n#print(find_artist_links(\"https://www.ultimate-guitar.com/explore?decade[]=1950&genres[]=4&order=hitstotal_desc&page=4&type[]=Chords\"))\n\n#print(find_artist_links(\"https://www.ultimate-guitar.com/explore?decade[]=1950&genres[]=4&page=2&type[]=Chords\"))\n\n\n#print(find_next_pages(\"https://www.ultimate-guitar.com/explore?decade[]=1950&genres[]=4&order=hitstotal_desc&type[]=Chords\"))\n#path = 'f:/WebCrawlers/1960Rock.json'\n\n\n# \"artist_url\":\"https:\\/\\/www.ultimate-guitar.com\\/artist\\/elvis_presley_11125\",\n\n# [\n# {\n# \"artist\": \"whatever\",\n# \"artist_id\": \"some-unique-id\", #maybe\n# \"song_name\": \"wahtever\",\n# \"chords\": [\"a\", \"b\"]\n# },\n# ...\n# ]\n\n" }, { "alpha_fraction": 0.4444045126438141, "alphanum_fraction": 0.47747933864593506, "avg_line_length": 32.01188278198242, "blob_id": "acd47b72b7ea81f4d292f647900f1fd12936bda2", "content_id": "f676ef41c5db3afd8d0364209a2aff53bff04e34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19471, "license_type": "no_license", "max_line_length": 98, "num_lines": 589, "path": "/MarkovProject/chord_analysis.py", "repo_name": "mattwparas/ultimate-guitar-chain", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 21 14:57:54 2018\n\n@author: Matthew Paras\n\"\"\"\nimport json\nimport os\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\nfrom random import shuffle\n\n# Contains interval information for each mode, as well as a \"modes\" list\nclass Mode(object):\n harmonic_minor = [0, 2, 3, 5, 7, 8, 11, 12]\n jazz_minor = [0, 2, 3, 5, 7, 9, 11, 12]\n\n ionian = [0, 2, 4, 5, 7, 9, 11, 12]\n dorian = [2, 4, 5, 7, 9, 11, 12, 2]\n phrygian = [4, 5, 7, 9, 11, 12, 2, 4]\n lydian = [5, 7, 9, 11, 12, 2, 4, 5]\n mixolydian = [7, 9, 11, 12, 2, 4, 5, 7]\n aeolian = [9, 11, 12, 2, 4, 5, 7, 9]\n locrian = [11, 12, 2, 4, 5, 7, 9, 11]\n\n modes = [ionian, dorian, phrygian, lydian, mixolydian, aeolian, locrian]\n\n\n# basic keyboard information for creating chords and playback\nclass Notes(object):\n flats = ['A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab']\n sharps = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']\n\n\nclass Chord(object):\n def __init__(self, list_of_notes, name):\n self.name = name\n self.list_of_notes = list_of_notes\n self.root = list_of_notes[0]\n\n def add_note_to_beginning(self, note):\n self.list_of_notes.insert(0, note)\n\n def make_flats(self):\n for note in self.list_of_notes:\n if note in Notes.sharps:\n temp = Notes.sharps.index(note)\n note = Notes.flats[temp]\n\n\nclass CustomProgression(object):\n def __init__(self, list_of_names):\n self.names = list_of_names\n self.chords = self.generate_notes()\n self.modes = self.map_chords_modes()\n\n def generate_notes(self):\n chord_output = [give_notes(name, Notes.flats) for name in self.names]\n return chord_output\n\n def map_chords_modes(self):\n mode_output = [chord_to_mode(name for name in self.names)]\n return mode_output\n\n def display(self):\n for i in range(0, len(self.modes)):\n print(self.names[i], self.modes[i])\n\n\n# Progression Class:\nclass Progression(object):\n def __init__(self, key, style, notes, chords, triads=\"normal\"):\n self.key = key\n self.style = style\n self.notes = notes\n self.chords = chords # list\n self.modes = self.generate_modes()\n self.triads = self.generate_triads(triads)\n self.names = self.give_names()\n\n def add_chords_to_progression(self, chords):\n for number in chords:\n self.chords.append(number)\n # update chords\n\n def generate_modes(self):\n if self.style == \"minor\" or self.style == \"Minor\":\n scale = minor(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n elif self.style == \"mixolydian\" or self.style == \"Mixolydian\":\n scale = give_mixolydian(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n elif self.style == \"phrygian\" or self.style == \"Phrygian\":\n scale = give_phyrgian(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n elif self.style == \"Jazz Minor\" or self.style == \"jazz minor\":\n scale = give_jazz_minor(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n else:\n scale = major(self.key, self.notes)\n new_modes = []\n for chords in self.chords:\n new_modes.append(scale[chords - 1])\n return new_modes\n\n def generate_triads(self, style):\n new_triads = []\n if style == \"seventh\":\n for mode in self.modes:\n new_triads.append(jazz_triad_7th(mode))\n return new_triads\n else:\n for mode in self.modes:\n new_triads.append(simple_triad(mode))\n return new_triads\n\n def give_names(self):\n new_names = []\n for chord in self.triads:\n new_names.append(give_chord_name(chord, self.notes))\n return new_names\n\n def display(self):\n print(self.key + ' ' + self.style)\n for i in range(0, len(self.modes)):\n print(self.chords[i], self.names[i], self.triads[i])\n\n\n# Functions for creating scales and modes\ndef give_mode(name, notes, mode):\n i = notes.index(name)\n scale = [notes[((i + interval) % 12)] for interval in mode]\n return scale\n\n\ndef major(key, notes):\n prog = [give_mode(key, notes, mode) for mode in Mode.modes]\n return prog\n\n\ndef minor(key, notes):\n i = notes.index(key)\n newKey = notes[(i + 3) % 12]\n startIndex = Mode.modes.index(Mode.aeolian)\n rotatedModes = Mode.modes[startIndex:] + Mode.modes[:startIndex]\n prog = [give_mode(newKey, notes, mode) for mode in rotatedModes]\n return prog\n\n\ndef give_jazz_minor(key, notes):\n temp = Mode.modes\n temp[0] = Mode.jazz_minor\n for mode in temp:\n mode = [3 if x == 4 else x for x in mode]\n prog = [give_mode(key, notes, mode) for mode in temp]\n return prog\n\n\ndef give_mixolydian(key, notes):\n i = notes.index(key)\n newKey = notes[(i + 5) % 12]\n startIndex = Mode.modes.index(Mode.mixolydian)\n rotatedModes = Mode.modes[startIndex:] + Mode.modes[:startIndex]\n prog = [give_mode(newKey, notes, mode) for mode in rotatedModes]\n return prog\n\n\ndef give_phyrgian(key, notes):\n i = notes.index(key)\n newKey = notes[(i + 8) % 12]\n startIndex = Mode.modes.index(Mode.phrygian)\n rotatedModes = Mode.modes[startIndex:] + Mode.modes[:startIndex]\n prog = [give_mode(newKey, notes, mode) for mode in rotatedModes]\n return prog\n\n\ndef simple_triad(scale):\n chord = [scale[0],\n scale[2],\n scale[4]]\n return chord\n\n\ndef jazz_triad_7th(scale):\n chord = [scale[0],\n scale[2],\n scale[4],\n scale[6]]\n return chord\n\n\nclass ChordDict(object):\n '''Dictionaries for naming chords'''\n\n Name_Dictionary = {(0, 3, 7): \"Minor\",\n (0, 3, 6): \"Dim\",\n (0, 4, 7): \"Major\",\n (0, 4, 8): \"Aug\",\n (0, 4, 7, 10): \"Dominant Seven\",\n (0, 3, 7, 10): \"Minor Seven\",\n (0, 4, 7, 11): \"Major Seven\",\n (0, 4, 8, 10): \"Aug Minor Seven\",\n (0, 3, 6, 9): \"Diminished Seven\",\n (0, 3, 6, 10): \"Half-diminished Seven\",\n (0, 5, 7): \"Sus4\",\n (0, 2, 7): \"Sus2\",\n (0, 5, 10): \"Quartal\",\n (0, 5, 7, 10): \"7Sus4\",\n (0, 2, 7, 10): \"7Sus2\",\n (0, 5, 7, 10, 2): \"9Sus4\",\n (0, 4, 7, 9): \"Major Sixth\",\n (0, 4, 7, 8): \"Minor Sixth\",\n (0, 3, 7, 11): \"Major Minor Seventh\",\n (0, 4, 7, 14): \"Major Nine\",\n (0, 2, 4, 7): \"add9\",\n (0, 3, 7, 2): \"Minor Nine\",\n (0, 7): \"5\",\n (0, 3, 7, 10, 14, 17): \"Minor Eleventh\",\n (0, 4, 7, 11, 14, 17): \"Major Eleventh\",\n (0, 4, 7, 10, 14, 17): \"Dominant Eleventh\",\n (0, 4, 7, 10, 14, 17, 19): \"Dominant Thirteenth\",\n (0, 2, 7, 11): \"Major Seven Sus2\",\n (0, 5, 7, 11): \"Major Seven Sus4\",\n (0, 4, 7, 11, 18): \"Major Seven Sharp Eleven\",\n (0, 5, 7, 13): \"Sus4 Flat9\",\n (0, 2, 3, 4): \"add4\"}\n\n Reverse_Dictionary = {\"Minor\": (0, 3, 7),\n \"Dim\": (0, 3, 6),\n \"Major\": (0, 4, 7),\n \"Aug\": (0, 4, 8),\n \"Dominant Seven\": (0, 4, 7, 10),\n \"Minor Seven\": (0, 3, 7, 10),\n \"Major Seven\": (0, 4, 7, 11),\n \"Aug Minor Seven\": (0, 4, 8, 10),\n \"Diminished Seven\": (0, 3, 6, 9),\n \"Half-diminished Seven\": (0, 3, 6, 10),\n \"Sus4\": (0, 5, 7),\n \"Sus2\": (0, 2, 7),\n \"Quartal\": (0, 5, 10),\n \"7Sus4\": (0, 5, 7, 10),\n \"7Sus2\": (0, 2, 7, 10),\n \"9Sus4\": (0, 5, 7, 10, 2),\n \"Major Sixth\": (0, 5, 7, 9),\n \"Minor Sixth\": (0, 5, 7, 8),\n \"Major Minor Seventh\": (0, 3, 7, 11),\n \"Major Nine\": (0, 4, 7, 14),\n \"add9\": (0, 2, 4, 7),\n \"Minor Nine\": (0, 3, 7, 14),\n \"5\": (0, 7),\n \"Minor Eleventh\": (0, 3, 7, 10, 14, 17),\n \"Major Eleventh\": (0, 4, 7, 11, 14, 17),\n \"Dominant Eleventh\": (0, 4, 7, 10, 14, 17),\n \"Dominant Thirteenth\": (0, 4, 7, 10, 14, 19),\n \"Major Seven Sus2\": (0, 2, 7, 11),\n \"Major Seven Sus4\": (0, 5, 7, 11),\n \"Major Seven Sharp Eleven\": (0, 4, 7, 11, 18),\n \"Sus4 Flat9\": (0, 5, 7, 13),\n \"add4\": (0, 2, 3, 4)}\n\n chord_mode_dictionary = {\"Minor\": Mode.aeolian,\n \"Dominant Seven\": Mode.mixolydian,\n \"Minor Seven\": Mode.dorian,\n \"Major Seven\": Mode.ionian,\n \"Major Seven Sharp Eleven\": Mode.locrian,\n \"Sus4 flat9\": Mode.phrygian,\n \"Half-diminished Seven\": Mode.locrian}\n\n Unique_Name_Dictionary = {\"m\": \"Minor\",\n \"dim\": \"Dim\",\n '': \"Major\",\n #(0, 4, 8): \"Aug\",\n \"7\": \"Dominant Seven\",\n \"m7\": \"Minor Seven\",\n #(0, 4, 7, 11): \"Major Seven\",\n #(0, 4, 8, 10): \"Aug Minor Seven\",\n #(0, 3, 6, 9): \"Diminished Seven\",\n #(0, 3, 6, 10): \"Half-diminshed Seven\",\n \"sus4\": \"Sus4\",\n \"sus2\": \"Sus2\",\n \"sus\": \"Sus4\",\n #(0, 5, 10): \"Quartal\",\n \"7sus4\": \"7Sus4\",\n \"7sus\": \"7Sus4\",\n \"9\": \"Major Nine\",\n \"maj9\": \"Major Nine\",\n \"m9\": \"Minor Nine\",\n \"add9\": \"add9\",\n \"maj7\": \"Major Seven\", \n #(0, 5, 7, 10, 2): \"9Sus4\",\n \"6\" : \"Major Sixth\",\n \"m6\": \"Minor Sixth\",\n \"mmaj7\": \"Major Minor Seventh\",\n \"M7\": \"Major Seven\",\n #\"7M\": \"Major Seven\",\n \"dim7\": \"Diminished Seven\",\n \"5\": \"5\",\n \"Maj9\": \"Nine\",\n \"2\": \"Sus2\",\n \"4\": \"Sus4\",\n \"madd9\": \"Minor Nine\",\n \"madd11\": \"Minor Eleventh\",\n \"add11\": \"Major Eleventh\",\n \"11\": \"Dominant Eleventh\",\n \"13\": \"Dominant Thirteenth\",\n \"m7add11\": \"Minor Eleventh\",\n \"maj7sus2\": \"Major Seven Sus2\",\n \"maj7sus4\": \"Major Seven Sus4\",\n \"7Sus2\": \"7Sus2\",\n \"7sus2\": \"7Sus2\",\n \"add4\": \"add4\"}\n\n\n\n\n\n# Give name of a chord based on input of notes\n# chord->[list of notes]\n# notes->[white and black keys (flat or sharp)]\n# Add something to deal with input of something that isnt a chord????\ndef give_chord_name(chord, notes):\n i = notes.index(chord[0])\n intervals = []\n for note in chord:\n j = i\n temp_interval = 0\n while(notes[j] != note):\n j = (j + 1) % 12\n temp_interval = temp_interval + 1\n intervals.append(temp_interval)\n tup_int = tuple(intervals)\n temp = Mode.Name_Dictionary[tup_int]\n return chord[0] + \" \" + temp\n\n\ndef give_chord_numbers(key, style, notes, list_of_chord_names):\n prog = Progression(key, style, notes, [1, 2, 3, 4, 5, 6, 7])\n chord_num = [prog.names.index(chord) + 1 for chord in list_of_chord_names]\n return chord_num\n\n\ndef give_notes(chord_name, notes):\n temp = chord_name.partition(\" \")\n key = temp[0]\n chord_type = temp[2]\n i = notes.index(key)\n temp2 = ChordDict.Reverse_Dictionary[chord_type]\n final = [notes[((i + interval) % 12)] for interval in temp2]\n return final\n\n\ndef chord_to_mode(chord_name):\n temp = chord_name.partition(\" \")\n key = temp[0]\n chord_type = temp[2]\n temp2 = ChordDict.chord_mode_dictionary[chord_type]\n return give_mode(key, Notes.flats, temp2)\n\n\ndef Special_Name(chord_name):\n slash = False\n if '/' in chord_name:\n slash = True\n split_chord = chord_name.partition('/')\n root = split_chord[-1]\n if root in Notes.sharps:\n new_index = Notes.sharps.index(root)\n root = Notes.flats[new_index]\n chord_name = split_chord[0]\n unique = []\n #########################\n modifiers = ['di', 'm', 'M', 'aj', 'add', '5', '6', '7', '9', '11', '13','sus', '2', '4']\n for modifier in modifiers:\n if modifier != '' and modifier in chord_name:\n if modifier == 'm' and 'mm' in chord_name:\n unique.append('mm')\n else:\n unique.append(modifier)\n new_name = ''.join(unique)\n chord_type = ChordDict.Unique_Name_Dictionary[new_name]\n key = chord_name.replace(new_name, '')\n if key in Notes.sharps:\n i = Notes.sharps.index(key) \n else:\n i = Notes.flats.index(key)\n temp2 = ChordDict.Reverse_Dictionary[chord_type]\n final = [Notes.flats[((i + interval) % 12)] for interval in temp2]\n chord_object = Chord(final, chord_name)\n\n\n # put key identifier function here \n # make note if it is slash on the object\n # when determining the key->check if the root is the root, if not \n\n if(slash):\n chord_object.add_note_to_beginning(root)\n\n chord_object.make_flats()\n\n return chord_object\n\n\ndef transpose_and_store(progression):\n #test_data = {name -> progression}\n key = progression.key\n key_note = key.partition(' ')[0]\n chord_list = []\n for chord in progression.chords:\n new_chord = chord\n new_chord.make_flats\n chord_list.append(new_chord)\n final = []\n i = Notes.flats.index(key_note)\n for chord in chord_list:\n intervals = []\n for note in chord.list_of_notes:\n if note not in Notes.flats:\n raise Exception('For some reason, this chord had notes that did not exist!')\n j = i\n temp_interval = 0\n while(Notes.flats[j] != note):\n j = (j + 1) % 12\n temp_interval = temp_interval + 1\n intervals.append(temp_interval)\n intervals = tuple(intervals)\n set_of_final = (intervals)\n final.append(set_of_final)\n return final\n\n\ndef open_file(path):\n json1_file = open(path)\n json1_str = json1_file.read()\n json1_data = json.loads(json1_str)\n return json1_data\n\n\ndef build_feature_vector(progression):\n reference = {}\n total_count = 1\n for n in Notes.flats:\n reference[n] = 0\n for chord in progression:\n for note in chord:\n reference[note] += 1\n total_count += 1\n feature = np.array([reference[key] for key in reference])\n return feature\n\n\ndef combine_files(path):\n data = []\n for file in os.listdir(path):\n data += open_file(path + \"/\" + file)\n return data\n\n\ndef combine_folders(path):\n data = []\n for folder in os.listdir(path):\n data += combine_files(path + \"/\" + folder)\n return data\n\n\ndef parse_prog(data):\n output = []\n labels = []\n error_count = 0\n minor_flats = [x + 'm' for x in Notes.flats]\n minor_sharps = [x + 'm' for x in Notes.sharps]\n flats = Notes.flats + minor_flats\n sharps = Notes.sharps + minor_sharps\n for i in range(len(data)):\n try:\n parsed = [Special_Name(chord).list_of_notes for chord in data[i]['chord_progression']]\n feature = build_feature_vector(parsed) \n try:\n if data[i]['tonality_name'] in flats:\n labels.append(flats.index(data[i]['tonality_name']))\n else:\n labels.append(sharps.index(data[i]['tonality_name']))\n output.append(feature)\n except:\n pass\n\n except (KeyError, ValueError) as e:\n #print(data[i]['chord_progression'])\n #print(\"error\")\n error_count += 1\n #pass\n \n print(error_count)\n return np.array(output), np.array(labels)\n\n\ndef songs_with_keys(data):\n return [x for x in data if x[\"tonality_name\"] != '']\n\n\ndef find_unique(data):\n unique_dict = {}\n output = []\n for song in data:\n if song['id'] not in unique_dict:\n unique_dict[song['id']] = True\n output.append(song)\n return output\n\ndef build_regression(data):\n x, y = parse_prog(data)\n logisticRegr = LogisticRegression(solver = \"newton-cg\", multi_class = \"multinomial\")\n print(y)\n logisticRegr.fit(x, y)\n print(logisticRegr.score(x, y))\n return logisticRegr\n\n\n\n\nif __name__ == '__main__':\n \n path = 'f:/WebCrawlers/RockMusic'\n \n #data = combine_folders(path)\n \n\n #no_duplicates = find_unique(data)\n \n \n #keys = songs_with_keys(no_duplicates)\n \n build_regression(keys)\n \n #print(Special_Name('Ab').list_of_notes)\n \n \n \n #parse_prog(total_data)\n #print(data[0])\n \n #test = Special_Name('Dbm')\n #print(test.name, test.list_of_notes)\n \n #for chord in data[0]['chord_progression']:\n #test = Special_Name(chord)\n #print(test.name, test.list_of_notes)\n \n #total_data = data + data2 + data3\n\n \n\n \n #x, y = parse_prog(keys)\n \n #logisticRegr = LogisticRegression(solver = \"newton-cg\", multi_class = \"multinomial\")\n \n\n \n #print(y)\n \n #logisticRegr.fit(x, y)\n \n \n #print(logisticRegr.score(x, y))\n \n \n #print(build_feature_vector(test))\n \n # print(os.listdir())\n \n# for song in data3:\n# if song[\"tonality_name\"] != '':\n# print(song[\"song_name\"], song[\"tonality_name\"])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" } ]
6
zymos/nature_emulator
https://github.com/zymos/nature_emulator
8f3ddba64fafd8083231d3c54bcb0b77e5870a96
c79431ad63dc607f8c196ca26f9829b0fc82ed1c
c89ab83cebb52a5160faa2786120604b34e42de5
refs/heads/master
2021-01-20T15:37:05.525860
2015-07-05T16:15:25
2015-07-05T16:15:25
35,963,076
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.58685702085495, "alphanum_fraction": 0.6040034294128418, "avg_line_length": 31.793893814086914, "blob_id": "c54706a4900ca4ae753e36df045533d31b37a9a6", "content_id": "998f1696000bb9c8ba2334d6d987862fa6bb4a13", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12889, "license_type": "permissive", "max_line_length": 590, "num_lines": 393, "path": "/11-full_day.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\n# from pygame import mixer\nimport time\nimport threading\nimport sys\n\n##########################################################\n# Configure\n#\n\ndef configure():\n ##########################\n # Global Vars\n global AUDIO_CROSSFADE_TIME, STOPWATCH_INTERVAL, AUDIO_FADE_TIME, DEBUG, BACKGROUND_SOUND_FILENAME, BACKGROUND_SOUND_VOLUME, BACKGROUND_SOUND_ENABLE, ENABLE_FIXED_TIME_MODE, START_OF_THE_DAY_MODE, END_OF_THE_DAY_MODE, ENABLE_FIXED_LATENIGHT, FIXED_TIME_DAWN, FIXED_TIME_SUNRISE, FIXED_TIME_MIDMORNING, FIXED_TIME_MIDAFTERNOON, FIXED_TIME_SUNSET, FIXED_TIME_DUSK, FIXED_TIME_LATENIGHT, DAWN_AUDIO_FILENAME, SUNRISE_AUDIO_FILENAME, MIDMORNING_AUDIO_FILENAME, MIDAFTERNOON_AUDIO_FILENAME, SUNSET_AUDIO_FILENAME, DUSK_AUDIO_FILENAME, LATENIGHT_AUDIO_FILENAME, CITY_LOCATION, AUDIO_DIRECTORY\n\n #######################\n # Main Configs\n \n AUDIO_DIRECTORY = \"\"\n # location of audio files: full path for directory\n\n CITY_LOCATION = \"Denver\"\n # see: \n\n\n ####################\n # Background sound\n BACKGROUND_SOUND_ENABLE = True\n # Will play the following file 24h a day (True or False)\n BACKGROUND_SOUND_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: rain, waves, wind\n BACKGROUND_SOUND_VOLUME = 0.7\n # Volume for the background sound (0.0 min - 1.0 max)\n\n######################################\n# Definition for times of the day\n#\n# Division of times of day\n# Beginning of day>>\n# dawn > morning > midday > evening > dusk > night\n# <<End of day\n# \n# dawn: twilight till sunrise example(05:00 to 05:40)\n# twilight = sun 18 degrees below the horizon\n# sunrise = sunrise\n# morning: sunrise till midmorning example(5:45 to 10:00)\n# sunrise = sunrise\n# midmorning = (12:00 - sunrise) / 2\n# midday: midmorning till midafternoon example(10:00 to 15:00\n# midmorning = (12:00 - sunrise) / 2\n# midafternoon = (sunset - 12:00) / 2\n# evening: midafternoon to sunset, example(15:00 to 20:15)\n# midafternoon: (sunset - 12:00) / 2\n# sunset = sunset\n# dusk: sunset till dusk, example(20:15 to 20:45)\n# sunset = sunset\n# dusk = sun 18 degrees below the horizon\n# night: dusk till latenight, example(20:45 to 22:00)\n# dusk = sun 18 degrees below the horizon\n# latenight = (24:00 - sunset) / 2\n#\n # Fixed times of the day\n\n ###############################\n # Time of day Modes\n # \n ENABLE_FIXED_TIME_MODE = True\n # if True \"start_of_day\" and \"end_of_day\" is set to \"fixed\"\n # True or False\n START_OF_THE_DAY_MODE = \"fixed\" # options \"fixed\", \"dawn\", \"sunrise\"\n # if start of day is \"fixed\", set \"dawn\" and \"sunrise\" times\n END_OF_THE_DAY_MODE = \"fixed\" \n # options \"fixed\", \"sunset\", \"dusk\", \"latenight\"\n # if end of day is \"fixed\", set \"dawn\" and \"sunrise\" times\n # if sunset, audio will end at sunset\n # if dusk, audio will end at dusk\n # if latenight, audio will end a latenight\n ENABLE_FIXED_LATENIGHT = True\n # if mode is \"fixed\" this allows you to disable audio after dusk\n # True or False\n\n ##################################\n # Times of day for \"fixed\" mode\n # Time format: HH:MM in 24h clock, example 22:00 is 10pm\n FIXED_TIME_DAWN = \"05:00\"\n FIXED_TIME_SUNRISE = \"05:45\"\n FIXED_TIME_MIDMORNING = \"10:00\"\n FIXED_TIME_MIDAFTERNOON = \"15:00\"\n FIXED_TIME_SUNSET = \"20:15\"\n FIXED_TIME_DUSK = \"20:45\"\n FIXED_TIME_LATENIGHT = \"22:00\"\n\n #####################\n # Audio Filenames\n # mp3, wav or ogg files\n DAWN_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: light bird sounds\n SUNRISE_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: moderate bird sounds\n MIDMORNING_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: light bird sounds\n MIDAFTERNOON_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: moderate birds sounds, rain, storms\n SUNSET_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: crickets, frogs, \n DUSK_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: crickets, frogs\n LATENIGHT_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: owls, coyotes, wolves\n\n\n #########################\n # Optional\n #\n OVERLAP_TIME = 120 \n # overlap time between files, between times of day (seconds)\n\n AUDIO_FADE_TIME = 20 \n # fade in/out time for each audio file (seconds)\n\n DEBUG = True\n # DEBUG = False\n # enable more verbose output for debuging (True or False)\n\n\n # Old Configs\n AUDIO_CROSSFADE_TIME = 2000 # time for audio crossfade (ms)\n # init_start_time_sec = 1\n start_time = \"13:36\" # HH:MM\n stop_time = \"14:32\" # HH:MM\n \n\n\n # init_time = round(time.time()) # (s)\n # print init_time\n return\n\n\n\n\n#############################################################\n# Code\n#\n\n\n# Plays a constant background sound, in an infinite loop\ndef play_background_sound():\n if BACKGROUND_SOUND_ENABLE:\n print \"%s: Playing background sound\" % (time.strftime(\"%H:%M:%S\")) \n pygame.mixer.music.load(BACKGROUND_SOUND_FILENAME)\n pygame.mixer.music.set_volume(BACKGROUND_SOUND_VOLUME)\n pygame.mixer.music.play(-1)\n\n return\n\n\ndef audio_play_loop(filename, start_time_sec, stop_time_sec):\n # repeat playing a single file for a set durreation with crossfade\n # between\n\n # play_durration: total time file should be played (s)\n # global AUDIO_CROSSFADE_TIME # crossfade (ms)\n # global stopwatch_count\n # global STOPWATCH_INTERVAL # (s)\n # global init_time # (s)\n \n play_durration = stop_time_sec - start_time_sec # (s)\n print \"%s: Starting Audio PlayerAudio, interval [%ds]\" % (time.strftime(\"%H:%M:%S\"), play_durration)\n print \"%s: Loading sound file...\" % (time.strftime(\"%H:%M:%S\"))\n # trans = AUDIO_CROSSFADE_TIME\n sound = pygame.mixer.Sound(filename)\n file_length = sound.get_length() # file length (s)\n print \"%s: Loaded sound file [%ds]\" % (time.strftime(\"%H:%M:%S\"), file_length//60)\n \n # Calculate how many play loops\n if file_length >= play_durration:\n play_loops=0\n else:\n if play_durration % file_length == 0:\n # perfect division\n play_loops = play_durration // file_length;\n else:\n # has remaining time\n play_loops = (play_durration // file_length) + 1;\n\n\n # calculate sleep time\n sleep_time = play_durration - AUDIO_FADE_TIME\n\n if DEBUG:\n print \"\\t>>play_durration [%ds]\" % (play_durration)\n print \"\\t>>file_length [%ds]\" % (file_length)\n print \"\\t>>loops [%ds]\" % (play_loops)\n print \"\\t>>Operation: play(%dx) > sleep [%ds] > fadeout > sleep [%ds] > return\" % (play_loops, sleep_time, AUDIO_FADE_TIME) \n\n \n # Playing\n sound.play(loops=int(play_loops), fade_ms=int(AUDIO_FADE_TIME*1000))\n time.sleep(sleep_time)\n sound.fadeout(AUDIO_FADE_TIME*1000)\n time.sleep(AUDIO_FADE_TIME)\n\n return\n\n\n\n\ndef stopwatch(start_time_sec, stop_time_sec):\n # sets a global minute counter\n global stopwatch_count\n # global STOPWATCH_INTERVAL # (s)\n # global init_time # (s)\n # durration (s)\n durration = stop_time_sec - start_time_sec\n\n stopwatch_count = 0\n durration_minutes = durration / 60 # s to m\n\n print \"%s: Stopwatch started (max time = %dm)...\" % (time.strftime(\"%H:%M:%S\"), durration_minutes)\n # max_count = durration_minutes * 6\n max_count = durration / STOPWATCH_INTERVAL\n # print \"max_count=\", max_count\n while stopwatch_count < max_count:\n time.sleep(STOPWATCH_INTERVAL)\n stopwatch_count += 1\n # # print \"%s: Stopwatch: #%d - %ds\" % (time.strftime(\"%H:%M:%S\"), stopwatch_count, STOPWATCH_INTERVAL * stopwatch_count)\n return\n\n\n\n\n# def play_timer(filename, start_time_sec, stop_time_sec):\n # time to play\n # global init_time # (s)\n \n # debgugging code\n # if DEBUG:\n # print \"%s: Main function started... \\n\" % (time.strftime(\"%H:%M:%S\"))\n \n # play_durration = stop_time_sec - start_time_sec\n # continious_play_for_set_durration(filename, start_time_sec, stop_time_sec)\n # return\n\n\n\n# Initialize modules\ndef initialize():\n # initialize modules\n pygame.mixer.init()\n pygame.mixer.pre_init(44100, -16, 2, 2048)\n pygame.init()\n print \"%s: Audio module initialized...\" % (time.strftime(\"%H:%M:%S\"))\n return\n\n\n\n# def schedual_play():\n # time_delay = 1\n # play_event = threading.Timer(time_delay , play_timer, args=[filename, start_time_sec, stop_time_sec])\n # play_event.start()\n\n # return\n\ndef schedual_play(filename, start_time_hm, stop_time_hm):\n \n stop_time_s = hm_to_seconds(stop_time_hm) #(seconds)\n start_time_s = hm_to_seconds(start_time_hm) #(seconds)\n current_time_hm = time.strftime(\"%H:%M\") # HH:MM\n current_time_s = hm_to_seconds(current_time_hm) #(seconds)\n durration_s = stop_time_s - start_time_s #(seconds)\n wait_time_s = start_time_s - current_time_s #(seconds)\n\n if DEBUG:\n print \"%s: >>Current_time: %s [%ds]\" % (time.strftime(\"%H:%M:%S\"), current_time_hm, current_time_s)\n print \"\\t>>Start_time: %s [%ds]\" % (start_time_hm, start_time_s)\n print \"\\t>>Stop_time: %s [%ds]\" % (stop_time_hm, stop_time_s)\n print \"\\t>>Wait_time: [%ds]\" % (start_time_s - current_time_s)\n if start_time_s - current_time_s < 0:\n print \"\\t>>Warning wait_time is negative!: [%ds]\" % (start_time_s - current_time_s)\n print \"\\t>>Durration: [%ds]\" % (stop_time_s - start_time_s)\n if stop_time_s - start_time_s < 0:\n print \"\\t>>Warning durration is negative!: [%ds]\" % (stop_time_s - start_time_s)\n\n print \"%s: Scheduling play @ %s [%ds]\" % (time.strftime(\"%H:%M:%S\"), start_time_hm, durration_s)\n\n if DEBUG:\n print \"\\t>>Opening play thread in [%ds], durration=%ds\" % (wait_time_s, durration_s)\n\n\n # Play function\n # continious_play_for_set_durration(filename, start_time_sec, stop_time_sec)\n # event = threading.Timer(wait_time_s, continious_play_for_set_durration, [filename, start_time_s, stop_time_s])\n event = threading.Timer(wait_time_s, audio_play_loop, [filename, start_time_s, stop_time_s])\n event.daemon = True\n event.start()\n\n return\n\n\n\ndef generate_daily_schedule():\n time_now = time.strftime(\"%H:%M\")\n\n print \"%s: Generating scheduled events for the day [%s]\" % (time.strftime(\"%H:%M:%S\"), time.strftime(\"%a, %e %b %Y\"))\n\n if DEBUG:\n print \"\\t>>The time is now [%s]\" % (time_now)\n\n # Getting Events\n\n return\n\n\n\n\n# Converts HH:MM:SS to seconds\ndef hms_to_seconds(t):\n h, m, s = [int(i) for i in t.split(':')]\n return 3600*h + 60*m + s\n\n\n\n# Converts HH:MM:SS to seconds\ndef hm_to_seconds(t):\n h, m = [int(i) for i in t.split(':')]\n return 3600*h + 60*m \n\n\n\n\n# Main Function\ndef main():\n # main function\n\n print \"\\n\\n\\n\"\n \n # CONFIGURE\n print \"%s: Configuring...\" % (time.strftime(\"%H:%M:%S\"))\n # configure()\n import config_file\n\n # print \" > start_time_sec = %ds; stop_time_sec = %ds; play_durration = %ds;\" % (start_time_sec, stop_time_sec, (stop_time_sec - start_time_sec) )\n # print \" > main_fade_in_time = %ds;transition_time = %dms; STOPWATCH_INTERVAL = %ds;\" % (AUDIO_FADE_TIME, AUDIO_CROSSFADE_TIME, STOPWATCH_INTERVAL)\n \n # INTIALIZE\n initialize() # initialize modules\n\n generate_daily_schedule()\n\n # Start background sound\n if BACKGROUND_SOUND_ENABLE:\n play_background_sound()\n \n \n #PLAY\n # filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/250Hz_44100Hz_16bit_30sec.ogg'\n # filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/13_Streamside_Songbirds.ogg'\n # continious_play_for_set_durration(filename, total_play_time)\n # play_timer(filename, start_time_sec, stop_time_sec)\n \n # if DEBUG:\n # print \"%s: >>starting schedual_play function\" % (time.strftime(\"%H:%M:%S\"))\n # print \"\\t>>file=%s\" % (filename)\n # print \"\\t>>start_time=%s, stop_time=%s\" % (start_time, stop_time)\n\n\n # schedual_play(filename, start_time, stop_time)\n # schedual_play()\n \n \n while True:\n time.sleep(60)\n print \".\"\n # EXIT\n time.sleep(3)\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n return\n\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except (KeyboardInterrupt, SystemExit):\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n sys.exit(1)\n\n" }, { "alpha_fraction": 0.6161074042320251, "alphanum_fraction": 0.6214765310287476, "avg_line_length": 34.380950927734375, "blob_id": "884688b3adf0caa413f15e029f27f536ba592ce5", "content_id": "a518a18c683415df36be834298a7ecd96b7b0ac7", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "permissive", "max_line_length": 165, "num_lines": 21, "path": "/steps/parseconfig.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "from ConfigParser import SafeConfigParser\n\n\ndef config_get_true_false(parameter, value, default):\n if value == 1 || value == \"1\" || value == \"true\" || value == \"True\" || value == \"TRUE\":\n return True\n elif value == 0 || value == \"0\" || value == \"false\" || value == \"False\" || value == \"FALSE\":\n return False\n else:\n print \"Error: Variable \\\"%s\\\" is set to \\\"%s\\\", it should be set to \\\"True\\\" or \\\"False\\\", by default setting %s=%s\" % (parameter, value, parameter, default)\n return default\n\n\nparser = SafeConfigParser()\nparser.read('config.ini')\n\nprint parser.get('Basic', 'location_city')\n\n\nprint parser.get('Play_Times', 'start_time_dawn')\nprint parser.getint('Advanced', 'audio_clip_crossfade_time_ms')\n\n\n" }, { "alpha_fraction": 0.6763485670089722, "alphanum_fraction": 0.7074688673019409, "avg_line_length": 27.294116973876953, "blob_id": "c78782b299aac3c102eeec089ef6d3281f16612a", "content_id": "676cd1a49657f6ea7906aa62673abfc762b1e74b", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 482, "license_type": "permissive", "max_line_length": 72, "num_lines": 17, "path": "/README.txt", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "\n#######################################\n# \n# Sources:\n\nUS Fish and Wildlife Service, \"Animal sounds from nature\", Public Domain\n\thttps://archive.org/details/animalsounds1\n\nTom Mansell, \"Tom Mansell's Nature Sounds\", July 2011,\nAttribution 3.0 \nhttps://archive.org/details/GrassLakeSanctuarySounds\n\nhttp://archive.org/search.php?query=subject%3A%22Nature+Sounds%22&page=2\n\n\nGaia, \"The Sounds of Nature Collection\"\nUsage Attribution 3.0\nhttps://archive.org/details/Sounds_of_Nature_Collection\n" }, { "alpha_fraction": 0.5701719522476196, "alphanum_fraction": 0.6065573692321777, "avg_line_length": 27.248586654663086, "blob_id": "c3ab58474c1221cec890ff02e084b4db16eb586d", "content_id": "9e2aabbff5eef7f49a325c31f5f4ae63de1c73f6", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 5002, "license_type": "permissive", "max_line_length": 73, "num_lines": 177, "path": "/configure.ini", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "####################################################\n#\n# Nature Emulator: Default Config File\n#\n# it is not recomended to edit this file\n# to override, edit \"config_user.ini\" instead\n#\n####################################################\n\n\n########################################################################\n# General Configs\n#\n[General]\n# LOCAL_CONFIG_FILE is the config file you should edit\n# can be a filename or full path of filename\nLOCAL_CONFIG_FILE = \"config_user.ini\"\n\nPRIMARY_AUDIO_DIRECTORY = \"audio\"\nSECONDARY_AUDIO_DIRECTORY = \"\"\n# location of audio files: \n# can be relative path or absolute path\n\nCITY_LOCATION = \"Denver\"\n# see: \n\n########################################################################\n# Hardware\n#\nENABLE_WIFI = \"false\"\nWIFI_ACCESS_POINT = \"\"\nWIFI_PASSWORD = \"\"\n\n\n########################################################################\n# Background sound\n#\n[Background_audio]\n\nBACKGROUND_SOUND_ENABLE = True\n# Will play the following file 24h a day (True or False)\nBACKGROUND_SOUND_FILENAME = '13_Streamside_Songbirds.ogg'\n# must be mp3/ogg/wav\n# can be filename or filenames full path\n# recomended: rain, waves, wind\nBACKGROUND_SOUND_VOLUME = 70\n# Volume for the background sound (0 min - 100 max)\n# needs to be a number between 0 and 100\n\n#########################################################################\n# Definition for times of the day\n#\n# Division of times of day\n# Beginning of day>>\n# dawn > morning > midday > evening > dusk > night\n# <<End of day\n# \n# dawn: twilight till sunrise example(05:00 to 05:40)\n# twilight = sun 18 degrees below the horizon\n# sunrise = sunrise\n# morning: sunrise till midmorning example(5:45 to 10:00)\n# sunrise = sunrise\n# midmorning = (12:00 - sunrise) / 2\n# midday: midmorning till midafternoon example(10:00 to 15:00\n# midmorning = (12:00 - sunrise) / 2\n# midafternoon = (sunset - 12:00) / 2\n# evening: midafternoon to sunset, example(15:00 to 20:15)\n# midafternoon: (sunset - 12:00) / 2\n# sunset = sunset\n# dusk: sunset till dusk, example(20:15 to 20:45)\n# sunset = sunset\n# dusk = sun 18 degrees below the horizon\n# night: dusk till latenight, example(20:45 to 22:00)\n# dusk = sun 18 degrees below the horizon\n# latenight = (24:00 - sunset) / 2\n#\n# Fixed times of the day\n#\n########################################################################\n# Time of day Modes\n# \n[Time_of_day_modes]\nENABLE_FIXED_TIME_MODE = True\n# if True \"start_of_day\" and \"end_of_day\" is set to \"fixed\"\n# True or False\nSTART_OF_THE_DAY_MODE = \"fixed\" \n# options \"fixed\", \"dawn\", \"sunrise\"\n# if start of day is \"fixed\", set \"dawn\" and \"sunrise\" times\nEND_OF_THE_DAY_MODE = \"fixed\" \n# options \"fixed\", \"sunset\", \"dusk\", \"latenight\"\n# if end of day is \"fixed\", set \"dawn\" and \"sunrise\" times\n# if sunset, audio will end at sunset\n# if dusk, audio will end at dusk\n# if latenight, audio will end a latenight\nENABLE_DAWN_AUDIO = 'true'\n\nENABLE_MORNING_AUDIO = 'true'\n\nENABLE_MIDDAY_AUDIO = 'true'\n\nENABLE_DUSK_AUDIO = 'true'\n\nENABLE_NIGHT_AUDIO = 'true'\n\n# if mode is \"fixed\" this allows you to disable audio after dusk\n# True or False\n\n######################################################################\n# Times of day for \"fixed\" mode\n# time format: HH:MM in 24h clock, example 22:00 is 10pm\n# times must be in order\n[Fixed_times_of_day]\nFIXED_TIME_DAWN = \"05:00\"\nFIXED_TIME_SUNRISE = \"05:45\"\nFIXED_TIME_MIDMORNING = \"10:00\"\nFIXED_TIME_MIDAFTERNOON = \"15:00\"\nFIXED_TIME_SUNSET = \"20:15\"\nFIXED_TIME_DUSK = \"20:45\"\nFIXED_TIME_LATENIGHT = \"22:00\"\n\n####################################################################\n# Audio Filenames\n# must be mp3, wav or ogg files\n# can be filename or full path\n[Filenames]\nDAWN_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: light bird sounds\nMORNING_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: moderate bird sounds\nMIDDAY_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: light bird sounds\nEVENING_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: moderate birds sounds, rain, storms\nDUSK_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: crickets, frogs, \nNIGHT_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: owls, coyotes, wolves\n\n\n\n#########################\n# Volume\n#\n# Volume for the sound (0 min - 100 max)\nDAWN_AUDIO_VOLUME = 100\nMORNING_AUDIO_VOLUME = 100\nMIDDAY_AUDIO_VOLUME = 100\nEVENING_AUDIO_VOLUME = 100\nDUSK_AUDIO_VOLUME = 100\nNIGHT_AUDIO_VOLUME = 100\n\n\n\n#########################\n# Optional\n#\n[Optional]\nOVERLAP_TIME = 120 \n# overlap time between files, between times of day (seconds)\n\nAUDIO_FADE_TIME = 20 \n# fade in/out time for each audio file (seconds)\n\nDEBUG = True\n# DEBUG = False\n# enable more verbose output for debuging (True or False)\n\n\n# Old Configs\n[Old_config]\nAUDIO_CROSSFADE_TIME = 2000 \n# time for audio crossfade (ms)\n# init_start_time_sec = 1\nstart_time = \"13:36\" \n# HH:MM\nstop_time = \"14:32\" \n# HH:MM\n\n\n" }, { "alpha_fraction": 0.7129310369491577, "alphanum_fraction": 0.7413793206214905, "avg_line_length": 22.200000762939453, "blob_id": "c12caf793070df1fbd493fea7c53bcdba24ad97e", "content_id": "5d444fd0978cfaf16cfeafdcb0aff645c9dad514", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 1160, "license_type": "permissive", "max_line_length": 120, "num_lines": 50, "path": "/steps/config.ini", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "# This is the config file for the Nature Emulator\n#\n[Basic]\n# Basic confiuration\n\n# Closest major city found in docs/list_of_cities.txt\nlocation_city=\"Denver\"\n\n# Using a fixed play time for dawn and dusk, or the actual time for dawn and dusk, based on Location and day of the year\nfixed_play_time=\"True\"\n\n[Sound_Files]\n# Directory that holds soundfiles\nsound_directory=\"\"\n\n# sound file played a Dawn\nsound_filename_dawn=\"\"\n\nsound_filename_dusk=\"\"\n\n[Play_Times]\n# Times audio is playing for each time of the day\n#\n# if fixed_play_time=True, the following times will be used.\n# if fixed_play_time=False, these will be ignored and actual dawn/dusk\n# times will be calculated based on location and day of year\n# \n# Time format is HH:MM, for a 24h cycle. i.e. 14:23 is 14 hours, \n# 23 minutes.\n\n# Dawn\nstart_time_dawn=\"08:00\"\nstop_time_dawn=\"09:00\"\n\n# Dusk\nstart_time_dusk=\"21:00\"\nstop_time_dusk=\"22:00\"\n\n[Advanced]\n# Fade time between audio clips (milliseconds)\naudio_clip_crossfade_time_ms=2000\n\n# Fade time for the beginning and end of audio (seconds)\nfade_time=20\n\n# interval used in counter (seconds)\nstopwatch_interval=2\n\n# Enable debugging output\ndebug=True\n" }, { "alpha_fraction": 0.6247478127479553, "alphanum_fraction": 0.6523200869560242, "avg_line_length": 26.03636360168457, "blob_id": "4f69d44935c743def96bce87e6fb6ef2ab6973c4", "content_id": "d36fed55d2099750d113c03a3ea17abe92ff72ab", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1487, "license_type": "permissive", "max_line_length": 76, "num_lines": 55, "path": "/steps/calculated_merged_play.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\nfrom time import sleep\nimport threading\n\ndef play_audio(filename, length):\n print \"playing sound\"\n global audio_transition_time\n\n sound= pygame.mixer.Sound(filename)\n fade_time = audio_transition_time\n sound.play(loops=0, maxtime=0, fade_ms=fade_time)\n sleep(length / 1000)\n sound.fadeout(fade_time)\n return\n\ndef continious_play_for_set_durration(filename, play_durration):\n # repeat playing a single file for a set durreation with crossfade\n # between\n global audio_transition_time\n trans = audio_transition_time\n # sound= pygame.mixer.Sound(filename)\n length = 9450 # sound.get_length()\n # pygame.mixer.quit()\n\n fudge = 1\n time = 0 #seconds\n n = 0\n while( time + trans < play_durration - fudge ):\n if (n+1) * (length - trans )> play_durration:\n cutoff = play_durration - time\n play_audio(filename, cutoff)\n else:\n play_audio(filename, length)\n n += 1\n time = n * ( length - trans )\n return\n\n##########################\n# Vars\n# \naudio_transition_time = 2 * 1000 # time for audio crossfade (seconds)\ninit_start_time = 1\ntotal_play_time = 2 * 60 * 1000 # play time needed (seconds)\n\npygame.mixer.init()\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.init()\nprint \"Running Prog\"\n\nfilename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/bird.ogg'\ncontinious_play_for_set_durration(filename, total_play_time)\n\n\nsleep(7)\n" }, { "alpha_fraction": 0.586904764175415, "alphanum_fraction": 0.5982142686843872, "avg_line_length": 29.99262046813965, "blob_id": "3dacbd34364dbb2713b4406b237c8b8513f4e4a2", "content_id": "c84b1fcdfeb6dea2fb34c779c6d83050754a6874", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8400, "license_type": "permissive", "max_line_length": 206, "num_lines": 271, "path": "/10-schedualed-loop.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\n# from pygame import mixer\nimport time\nimport threading\nimport sys\n\n##########################################################\n# Configure\n#\n\ndef configure():\n ##########################\n # Global Vars\n global AUDIO_CROSSFADE_TIME, start_time_sec, stop_time_sec, STOPWATCH_INTERVAL, AUDIO_FADE_TIME, DEBUG, start_time, stop_time, BACKGROUND_SOUND_FILENAME, BACKGROUND_SOUND_VOLUME, BACKGROUND_SOUND_ENABLE\n\n # Background sound\n BACKGROUND_SOUND_ENABLE = True\n BACKGROUND_SOUND_FILENAME = '/home/zymos/Documents/docs/projects/pi/nature_emulator/13_Streamside_Songbirds.ogg'\n BACKGROUND_SOUND_VOLUME = 0.7\n\n # Location\n CITY_LOCATION = \"Denver\"\n\n\n # Modes\n\n FIXED_TIME_MODE_ENABLED = True\n START_OF_THE_DAY_MODE = \"fixed\" # options \"fixed\", \"dawn\", \"sunrise\"\n\n # Optional Configs\n AUDIO_CROSSFADE_TIME = 2000 # time for audio crossfade (ms)\n # init_start_time_sec = 1\n start_time = \"13:36\" # HH:MM\n stop_time = \"14:32\" # HH:MM\n \n start_time_sec = 6 * 60 *60 # (s)\n stop_time_sec = 6 * 60 * 60 + 60 # (s)\n STOPWATCH_INTERVAL = 2 # (s)\n AUDIO_FADE_TIME = 20 # (s)\n\n DEBUG = True\n # DEBUG = False\n # init_time = round(time.time()) # (s)\n # print init_time\n return\n\n\n\n\n#############################################################\n# Code\n#\n\n\n\n\ndef audio_play_loop(filename, start_time_sec, stop_time_sec):\n # repeat playing a single file for a set durreation with crossfade\n # between\n\n # play_durration: total time file should be played (s)\n # global AUDIO_CROSSFADE_TIME # crossfade (ms)\n # global stopwatch_count\n # global STOPWATCH_INTERVAL # (s)\n # global init_time # (s)\n \n play_durration = stop_time_sec - start_time_sec # (s)\n print \"%s: Starting Audio PlayerAudio, interval [%ds]\" % (time.strftime(\"%H:%M:%S\"), play_durration)\n print \"%s: Loading sound file...\" % (time.strftime(\"%H:%M:%S\"))\n # trans = AUDIO_CROSSFADE_TIME\n sound = pygame.mixer.Sound(filename)\n file_length = sound.get_length() # file length (s)\n print \"%s: Loaded sound file [%ds]\" % (time.strftime(\"%H:%M:%S\"), file_length//60)\n \n # Calculate how many play loops\n if file_length >= play_durration:\n play_loops=0\n else:\n if play_durration % file_length == 0:\n # perfect division\n play_loops = play_durration // file_length;\n else:\n # has remaining time\n play_loops = (play_durration // file_length) + 1;\n\n\n # calculate sleep time\n sleep_time = play_durration - AUDIO_FADE_TIME\n\n if DEBUG:\n print \"\\t>>play_durration [%ds]\" % (play_durration)\n print \"\\t>>file_length [%ds]\" % (file_length)\n print \"\\t>>loops [%ds]\" % (play_loops)\n print \"\\t>>Operation: play(%dx) > sleep [%ds] > fadeout > sleep [%ds] > return\" % (play_loops, sleep_time, AUDIO_FADE_TIME) \n\n \n # Playing\n sound.play(loops=int(play_loops), fade_ms=int(AUDIO_FADE_TIME*1000))\n time.sleep(sleep_time)\n sound.fadeout(AUDIO_FADE_TIME*1000)\n time.sleep(AUDIO_FADE_TIME)\n\n return\n\n\n\n\ndef stopwatch(start_time_sec, stop_time_sec):\n # sets a global minute counter\n global stopwatch_count\n # global STOPWATCH_INTERVAL # (s)\n # global init_time # (s)\n # durration (s)\n durration = stop_time_sec - start_time_sec\n\n stopwatch_count = 0\n durration_minutes = durration / 60 # s to m\n\n print \"%s: Stopwatch started (max time = %dm)...\" % (time.strftime(\"%H:%M:%S\"), durration_minutes)\n # max_count = durration_minutes * 6\n max_count = durration / STOPWATCH_INTERVAL\n # print \"max_count=\", max_count\n while stopwatch_count < max_count:\n time.sleep(STOPWATCH_INTERVAL)\n stopwatch_count += 1\n # # print \"%s: Stopwatch: #%d - %ds\" % (time.strftime(\"%H:%M:%S\"), stopwatch_count, STOPWATCH_INTERVAL * stopwatch_count)\n return\n\n\n\n\n# def play_timer(filename, start_time_sec, stop_time_sec):\n # time to play\n # global init_time # (s)\n \n # debgugging code\n # if DEBUG:\n # print \"%s: Main function started... \\n\" % (time.strftime(\"%H:%M:%S\"))\n \n # play_durration = stop_time_sec - start_time_sec\n # continious_play_for_set_durration(filename, start_time_sec, stop_time_sec)\n # return\n\n\n\n# Initialize modules\ndef initialize():\n # initialize modules\n pygame.mixer.init()\n pygame.mixer.pre_init(44100, -16, 2, 2048)\n pygame.init()\n print \"%s: Audio module initialized...\" % (time.strftime(\"%H:%M:%S\"))\n return\n\n\n\n# def schedual_play():\n # time_delay = 1\n # play_event = threading.Timer(time_delay , play_timer, args=[filename, start_time_sec, stop_time_sec])\n # play_event.start()\n\n # return\n\ndef schedual_play(filename, start_time_hm, stop_time_hm):\n \n stop_time_s = hm_to_seconds(stop_time_hm) #(seconds)\n start_time_s = hm_to_seconds(start_time_hm) #(seconds)\n current_time_hm = time.strftime(\"%H:%M\") # HH:MM\n current_time_s = hm_to_seconds(current_time_hm) #(seconds)\n durration_s = stop_time_s - start_time_s #(seconds)\n wait_time_s = start_time_s - current_time_s #(seconds)\n\n if DEBUG:\n print \"%s: >>Current_time: %s [%ds]\" % (time.strftime(\"%H:%M:%S\"), current_time_hm, current_time_s)\n print \"\\t>>Start_time: %s [%ds]\" % (start_time_hm, start_time_s)\n print \"\\t>>Stop_time: %s [%ds]\" % (stop_time_hm, stop_time_s)\n print \"\\t>>Wait_time: [%ds]\" % (start_time_s - current_time_s)\n if start_time_s - current_time_s < 0:\n print \"\\t>>Warning wait_time is negative!: [%ds]\" % (start_time_s - current_time_s)\n print \"\\t>>Durration: [%ds]\" % (stop_time_s - start_time_s)\n if stop_time_s - start_time_s < 0:\n print \"\\t>>Warning durration is negative!: [%ds]\" % (stop_time_s - start_time_s)\n\n print \"%s: Scheduling play @ %s [%ds]\" % (time.strftime(\"%H:%M:%S\"), start_time_hm, durration_s)\n\n if DEBUG:\n print \"\\t>>Opening play thread in [%ds], durration=%ds\" % (wait_time_s, durration_s)\n\n\n # Play function\n # continious_play_for_set_durration(filename, start_time_sec, stop_time_sec)\n # event = threading.Timer(wait_time_s, continious_play_for_set_durration, [filename, start_time_s, stop_time_s])\n event = threading.Timer(wait_time_s, audio_play_loop, [filename, start_time_s, stop_time_s])\n event.daemon = True\n event.start()\n\n return\n\n\n\n# Converts HH:MM:SS to seconds\ndef hms_to_seconds(t):\n h, m, s = [int(i) for i in t.split(':')]\n return 3600*h + 60*m + s\n\n\n\n# Converts HH:MM:SS to seconds\ndef hm_to_seconds(t):\n h, m = [int(i) for i in t.split(':')]\n return 3600*h + 60*m \n\n\n\n\n# Main Function\ndef main():\n # main function\n\n print \"\\n\\n\\n\"\n \n # CONFIGURE\n print \"%s: Configuring...\" % (time.strftime(\"%H:%M:%S\"))\n configure()\n\n # print \" > start_time_sec = %ds; stop_time_sec = %ds; play_durration = %ds;\" % (start_time_sec, stop_time_sec, (stop_time_sec - start_time_sec) )\n # print \" > main_fade_in_time = %ds;transition_time = %dms; STOPWATCH_INTERVAL = %ds;\" % (AUDIO_FADE_TIME, AUDIO_CROSSFADE_TIME, STOPWATCH_INTERVAL)\n \n # INTIALIZE\n initialize() # initialize modules\n\n\n #PLAY\n # filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/250Hz_44100Hz_16bit_30sec.ogg'\n filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/13_Streamside_Songbirds.ogg'\n # continious_play_for_set_durration(filename, total_play_time)\n # play_timer(filename, start_time_sec, stop_time_sec)\n \n if DEBUG:\n print \"%s: >>starting schedual_play function\" % (time.strftime(\"%H:%M:%S\"))\n print \"\\t>>file=%s\" % (filename)\n print \"\\t>>start_time=%s, stop_time=%s\" % (start_time, stop_time)\n\n\n schedual_play(filename, start_time, stop_time)\n # schedual_play()\n \n \n while True:\n time.sleep(60)\n print \".\"\n # EXIT\n time.sleep(3)\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n return\n\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except (KeyboardInterrupt, SystemExit):\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n sys.exit(1)\n\n" }, { "alpha_fraction": 0.5646212100982666, "alphanum_fraction": 0.5817620754241943, "avg_line_length": 25.509090423583984, "blob_id": "c9cccdd62abfd62e32a4f2974c83ce66eb2eb6c1", "content_id": "4ccf6c1235a1414e9f940db058af86734728f3d6", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2917, "license_type": "permissive", "max_line_length": 223, "num_lines": 110, "path": "/08-background_sound.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\n# from pygame import mixer\nimport time\nimport threading\nimport sys\n\n##########################################################\n# Configure\n#\n\ndef configure():\n ##########################\n # Global Vars\n global audio_transition_time, start_time_sec, stop_time_sec, stopwatch_count, stopwatch_interval, main_fade_time, debug, start_time, stop_time, BACKGROUND_SOUND_FILENAME, BACKGROUND_SOUND_VOLUME, BACKGROUND_SOUND_ENABLE\n\n\n BACKGROUND_SOUND_FILENAME = '/home/zymos/Documents/docs/projects/pi/nature_emulator/13_Streamside_Songbirds.ogg'\n BACKGROUND_SOUND_VOLUME = 0.7\n BACKGROUND_SOUND_ENABLE = True\n\n audio_transition_time = 2000 # time for audio crossfade (ms)\n # init_start_time_sec = 1\n start_time = \"03:37\" # HH:MM\n stop_time = \"03:44\" # HH:MM\n \n start_time_sec = 6 * 60 *60 # (s)\n stop_time_sec = 6 * 60 * 60 + 60 # (s)\n stopwatch_count = 0\n stopwatch_interval = 2 # (s)\n main_fade_time = 20 # (s)\n\n debug = True\n # debug = False\n # init_time = round(time.time()) # (s)\n # print init_time\n return\n\n\n\n\n#############################################################\n# Code\n#\n\n\n# Initialize modules\ndef initialize():\n # initialize modules\n pygame.mixer.init()\n pygame.mixer.pre_init(44100, -16, 2, 2048)\n pygame.init()\n print \"%s: Audio module initialized...\" % (time.strftime(\"%H:%M:%S\"))\n return\n\n\n# Plays a constant background sound, in an infinite loop\ndef play_background_sound():\n if BACKGROUND_SOUND_ENABLE:\n print \"%s: Playing background sound\" % (time.strftime(\"%H:%M:%S\")) \n pygame.mixer.music.load(BACKGROUND_SOUND_FILENAME)\n pygame.mixer.music.set_volume(BACKGROUND_SOUND_VOLUME)\n pygame.mixer.music.play(-1)\n\n return\n\n\n\n# Main Function\ndef main():\n # main function\n\n print \"\\n\\n\\n\"\n \n # CONFIGURE\n print \"%s: Configuring...\" % (time.strftime(\"%H:%M:%S\"))\n configure()\n\n # print \" > start_time_sec = %ds; stop_time_sec = %ds; play_durration = %ds;\" % (start_time_sec, stop_time_sec, (stop_time_sec - start_time_sec) )\n # print \" > main_fade_in_time = %ds;transition_time = %dms; stopwatch_interval = %ds;\" % (main_fade_time, audio_transition_time, stopwatch_interval)\n \n # INTIALIZE\n initialize() # initialize modules\n\n\n #PLAY\n play_background_sound()\n \n while True:\n time.sleep(60)\n print \".\"\n # EXIT\n time.sleep(3)\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n return\n\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except (KeyboardInterrupt, SystemExit):\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n sys.exit(1)\n\n" }, { "alpha_fraction": 0.5792591571807861, "alphanum_fraction": 0.5917737483978271, "avg_line_length": 38.5544548034668, "blob_id": "c409d146ba3e04f84fc93c3f3abebe0c79de9617", "content_id": "24fd33b98487f41768dcb3b45eb5ae6384eb6c16", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11986, "license_type": "permissive", "max_line_length": 212, "num_lines": 303, "path": "/steps/06-play-durration-with-fade-in-out.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\n# from pygame import mixer\nimport time\nimport threading\nimport sys\n\n##########################################################\n# Configure\n#\n\ndef configure():\n ##########################\n # Global Vars\n global audio_transition_time, start_time, stop_time, stopwatch_count, stopwatch_interval, main_fade_time, debug\n\n audio_transition_time = 2000 # time for audio crossfade (ms)\n # init_start_time = 1\n start_time = 6 * 60 *60 # (s)\n stop_time = 6 * 60 * 60 + 60 # (s)\n stopwatch_count = 0\n stopwatch_interval = 2 # (s)\n main_fade_time = 20 # (s)\n\n debug = True\n # debug = False\n # init_time = round(time.time()) # (s)\n # print init_time\n return\n\n\n\n\n#############################################################\n# Code\n#\n\ndef play_audio(sound, start_time, stop_time, length, file_length):\n global audio_transition_time # ms\n global stopwatch_count # count * interval = stopwatch time \n global stopwatch_interval # (s)\n global main_fade_time # (s)\n global init_time # (s)\n # length: playtime length (s)\n\n fudge_factor = 0 # crossfade isn't exactly audo_transition_time\n # sound = pygame.mixer.Sound(filename) # load file\n # print \"Loading file: \", filename\n new_length = length - audio_transition_time/1000 - fudge_factor # \n\n\n\n\n #dubugging code\n if debug:\n print \"%s: Playing audio clip [%ds]\" % (time.strftime(\"%H:%M:%S\"), new_length)\n print \"\\t\\ttime(%ds to %ds) interval[%ds of %ds] crossfade(%ds) \"% (stopwatch_count*stopwatch_interval, stopwatch_count*stopwatch_interval+length, length, stop_time-start_time, audio_transition_time/1000)\n print \"\\t\\tstopwatch_time=%ds; file_length=%ds; full_play_durration=%ds\" % (stopwatch_count*stopwatch_interval, file_length, stop_time-start_time)\n print \"\\t\\tstopwatch_time=%ds; clip_play_length=%ds; new_length=%ds\" % (stopwatch_count*stopwatch_interval, length, new_length)\n print \"\\t\\tstopwatch_time=%ds; main_fade_time=%ds; transition_time=%ds\" % (stopwatch_count*stopwatch_interval, main_fade_time, audio_transition_time)\n\n # @ time=0; Start Volume at 0\n # if stopwatch_count < 1: # start with volume=0\n # volume = 0\n # sound.set_volume(volume)\n \n # Playing, with fade in/out\n if file_length > stop_time - start_time:\n # when file is longer than play duration\n if debug: print \"\\tfile is longer than play duration\" \n print \"%s: Volume: Fade In [%ds]\" % (time.strftime(\"%H:%M:%S\"), main_fade_time)\n sound.play(loops=0, maxtime=0, fade_ms=main_fade_time*1000) #play\n time.sleep(length - main_fade_time)\n print \"%s: Volume: Fade Out [%ds]\" % (time.strftime(\"%H:%M:%S\"), main_fade_time)\n sound.fadeout(main_fade_time*1000)\n time.sleep(main_fade_time)\n elif file_length >= main_fade_time:\n # when file is long enough for a complete fade in/out\n # but file_length is less than the full play_durration\n if debug: print \"\\tfile_length < play duration, but still >main_fade_time\"\n if stopwatch_count < 2:\n # Initial: Fade In\n print \"%s: Volume: Fade In [%ds]\" % (time.strftime(\"%H:%M:%S\"), main_fade_time) \n sound.play(loops=0, maxtime=0, fade_ms=main_fade_time*1000) #play\n time.sleep(new_length)\n sound.fadeout(audio_transition_time)\n elif stopwatch_count * stopwatch_interval >= stop_time - start_time - main_fade_time:\n # End: Fade Out\n # may make play longer than play_durration by main_fade_time\n sound.play(loops=0, maxtime=0, fade_ms=audio_transition_time) #play\n time.sleep(length)\n sound.fadeout(main_fade_time*1000)\n print \"%s: Volume: Fade Out [added %ds to play time for fade]\" % (time.strftime(\"%H:%M:%S\"), main_fade_time) \n time.sleep(main_fade_time)\n else:\n # Middle: no fade-in/out\n if debug: print \"\\tplaying clip in middle, no fade needed\"\n sound.play(loops=0, maxtime=0, fade_ms=audio_transition_time) #play\n time.sleep(length)\n sound.fadeout(audio_transition_time)\n else:\n # when file is shorter than a complete fade, so it uses a \n # incremental volume increase, not as smooth\n sound.play(loops=0, maxtime=0, fade_ms=audio_transition_time) #play\n time_count = 0\n while time_count <= new_length:\n # if stopwatch_count == 0:\n # print \"%s: Volume: Fade In (%d%%)\" % (time.strftime(\"%H:%M:%S\"), volume*100)\n # sound.set_volume(0)\n if stopwatch_count * stopwatch_interval <= main_fade_time:\n volume = 1.0 * stopwatch_count * stopwatch_interval / main_fade_time\n sound.set_volume(volume)\n if volume == 0.1:\n print \"%s: Volume: Fade In\" % (time.strftime(\"%H:%M:%S\"))\n if debug:\n print \"%s: Volume: Fade In (%d%%)\" % (time.strftime(\"%H:%M:%S\"), volume*100)\n elif stop_time - start_time - stopwatch_interval*stopwatch_count <= main_fade_time:\n volume = (1.0 * stop_time - start_time - stopwatch_count * stopwatch_interval) / main_fade_time\n sound.set_volume(volume)\n if volume == 0.9:\n print \"%s: Volume: Fade Out\" % (time.strftime(\"%H:%M:%S\"))\n if debug:\n print \"%s: Volume: Fade Out (%d%%)\" % (time.strftime(\"%H:%M:%S\"), volume*100)\n else:\n volume = 1.0\n sound.set_volume(volume)\n if debug:\n print \"%s: Volume: is full (%d%%)\" % (time.strftime(\"%H:%M:%S\"), volume*100)\n \n # Debuging code\n if debug:\n print \"\\t time=%ds: interval_play_time=%ds\" % (stopwatch_count * stopwatch_interval, stop_time - start_time)\n print \"\\t time=%ds: stopwatch_count=%d; stopwatch_interval=%ds\" % (stopwatch_count * stopwatch_interval, stopwatch_count, stopwatch_interval)\n print \"\\t time=%ds: clip_time=%ds; clip_length=%ds\" % (stopwatch_count * stopwatch_interval, time_count, new_length)\n print \"\\t time=%ds: volume=%d\" % (stopwatch_count * stopwatch_interval, volume)\n\n time_count += stopwatch_interval\n time.sleep(stopwatch_interval)\n sound.fadeout(audio_transition_time)\n\n if debug:\n print \"%s: Clip crossfading out (%dms)\" % (time.strftime(\"%H:%M:%S\"), audio_transition_time) \n\n return\n\n\n\n\ndef continious_play_for_set_durration(filename, start_time, stop_time):\n # repeat playing a single file for a set durreation with crossfade\n # between\n\n # play_durration: total time file should be played (s)\n global audio_transition_time # crossfade (ms)\n global stopwatch_count\n global stopwatch_interval # (s)\n global init_time # (s)\n \n play_durration = stop_time - start_time # (s)\n print \"%s: Audio play interval [%ds]\" % (time.strftime(\"%H:%M:%S\"), play_durration)\n print \"%s: Loading sound file...\" % (time.strftime(\"%H:%M:%S\"))\n trans = audio_transition_time\n sound = pygame.mixer.Sound(filename)\n file_length = sound.get_length() # file length (s)\n print \"%s: Loaded sound file [%ds]\" % (time.strftime(\"%H:%M:%S\"), file_length//60)\n \n t = threading.Thread(target=stopwatch, args=[start_time, stop_time])\n t.setDaemon(True)\n t.start() \n\n time.sleep(0.1) # ensure events are never exactly on stopwatch change\n\n\n \n #debuging\n if debug:\n print \"%s: Queue started...\" % (time.strftime(\"%H:%M:%S\"))\n print \" Audio clip length: %dm; play interval: %dm\" % (file_length//60, play_durration//60)\n \n # fudge = 1\n # time = 0 #seconds\n # n = 0\n \n play_count = play_durration // (file_length - audio_transition_time/1000)\n playtime_leftover = play_durration - (play_count * (file_length - audio_transition_time/1000))\n\n # print \" > file_length=%d; trans_time=%d; play_durration=%d\" % (length, audio_transition_time, play_durration)\n # print \" > play_count=%d; playtime_leftover=%d\" % (play_count, playtime_leftover)\n\n\n \n if file_length > play_durration: \n #file length is longer that play durration\n print \"%s: Queue: playing #[1 of 1] truncated to %ds\" % (time.strftime(\"%H:%M:%S\"), play_durration)\n play_audio(sound, start_time, stop_time, play_durration, file_length)\n else: \n # file length shorter than play durration, played more than once\n x = 0\n # print \"%s: Queue: playing #[%d of %d], %ds\" % (time.strftime(\"%H:%M:%S\"), x+1, play_count+1, length)\n while x < play_count:\n print \"%s: Queue: playing #[%d of %d], %ds\" % (time.strftime(\"%H:%M:%S\"), x+1, play_count+1, file_length)\n play_audio(sound, start_time, stop_time, file_length, file_length)\n x += 1\n if playtime_leftover != 0: # play leftover time on play durration\n print \"%s: Queue: playing #[%d of %d], truncated to %ds\" % (time.strftime(\"%H:%M:%S\"), x+1, play_count+1, playtime_leftover)\n play_audio(sound, start_time, stop_time, playtime_leftover, file_length)\n\n return\n\n\n\n\ndef stopwatch(start_time, stop_time):\n # sets a global minute counter\n global stopwatch_count\n global stopwatch_interval # (s)\n global init_time # (s)\n # durration (s)\n durration = stop_time - start_time\n\n stopwatch_count = 0\n durration_minutes = durration / 60 # s to m\n\n print \"%s: Stopwatch started (max time = %dm)...\" % (time.strftime(\"%H:%M:%S\"), durration_minutes)\n # max_count = durration_minutes * 6\n max_count = durration / stopwatch_interval\n # print \"max_count=\", max_count\n while stopwatch_count < max_count:\n time.sleep(stopwatch_interval)\n stopwatch_count += 1\n # # print \"%s: Stopwatch: #%d - %ds\" % (time.strftime(\"%H:%M:%S\"), stopwatch_count, stopwatch_interval * stopwatch_count)\n return\n\n\n\n\ndef play_timer(filename, start_time, stop_time):\n # time to play\n global init_time # (s)\n \n # debgugging code\n if debug:\n print \"%s: Main function started... \\n\" % (time.strftime(\"%H:%M:%S\"))\n \n # play_durration = stop_time - start_time\n continious_play_for_set_durration(filename, start_time, stop_time)\n return\n\n\n\ndef initialize():\n # initialize modules\n pygame.mixer.init()\n pygame.mixer.pre_init(44100, -16, 2, 2048)\n pygame.init()\n print \"%s: Audio module initialized...\" % (time.strftime(\"%H:%M:%S\"))\n return\n\n\n\n\ndef main():\n # main function\n\n print \"\\n\\n\\n\"\n \n # CONFIGURE\n print \"%s: Configuring...\" % (time.strftime(\"%H:%M:%S\"))\n configure()\n\n # print \" > start_time = %ds; stop_time = %ds; play_durration = %ds;\" % (start_time, stop_time, (stop_time - start_time) )\n # print \" > main_fade_in_time = %ds;transition_time = %dms; stopwatch_interval = %ds;\" % (main_fade_time, audio_transition_time, stopwatch_interval)\n \n # INTIALIZE\n initialize() # initialize modules\n\n\n #PLAY\n filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/250Hz_44100Hz_16bit_30sec.ogg'\n # filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/13_Streamside_Songbirds.ogg'\n # continious_play_for_set_durration(filename, total_play_time)\n play_timer(filename, start_time, stop_time)\n\n \n # EXIT\n time.sleep(3)\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n return\n\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except (KeyboardInterrupt, SystemExit):\n print\n print \"%s: Shutting down modules...\" % (time.strftime(\"%H:%M:%S\"))\n pygame.quit()\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n sys.exit(1)\n\n" }, { "alpha_fraction": 0.6081647872924805, "alphanum_fraction": 0.6208682656288147, "avg_line_length": 37.42654037475586, "blob_id": "efb4f00179ef0d22d9f45f814456098a8ed612c5", "content_id": "6bf899851fd68d969601e496c9c40458533fe1d0", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8108, "license_type": "permissive", "max_line_length": 257, "num_lines": 211, "path": "/steps/05-fade_in_multiplay.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\n# from pygame import mixer\nimport time\nimport threading\nimport sys\n\n\n\n\ndef play_audio(sound, start_time, stop_time, length):\n global audio_transition_time # ms\n global stopwatch_count # count * interval = stopwatch time \n global stopwatch_interval # (s)\n global main_fade_time # (s)\n global init_time # (s)\n # length: playtime length (s)\n\n fudge_factor = 1 # crossfade isn't exactly audo_transition_time\n # sound = pygame.mixer.Sound(filename) # load file\n # print \"Loading file: \", filename\n new_length = length - audio_transition_time/1000 - fudge_factor # \n\n # Start Volume at 0\n if stopwatch_count < 1: # start with volume=0\n volume = 0\n sound.set_volume(volume)\n \n # print \"playing start...\"\n # Start playing\n sound.play(loops=0, maxtime=0, fade_ms=audio_transition_time) #play\n print \"%s: Playing audio clip time(%ds to %ds) interval[%ds of %ds] crossfade(%ds)...\" % (time.strftime(\"%H:%M:%S\"), stopwatch_count*stopwatch_interval, stopwatch_count*stopwatch_interval+length, length, stop_time-start_time, audio_transition_time/1000)\n # print \"played\"\n # sleep until play is over, and generates main fade\n\n # print \" stopwatch_interval=%d; stopwatch_count=%d; main_fade_time=%d\" % (stopwatch_interval, stopwatch_count, main_fade_time)\n\n\n # Setting the volume(fade in/out) and sleeping until play is stopped\n if stopwatch_count * stopwatch_interval > stop_time - start_time - main_fade_time:\n # Fading Out\n print \"%s: Volume: Fading out...\" % (time.strftime(\"%H:%M:%S\"))\n elif stopwatch_count * stopwatch_interval >= main_fade_time : \n print \"%s: Volume: Full...\" % (time.strftime(\"%H:%M:%S\"))\n # Normal (Full) Volume\n # print \" Full Volume\"\n sound.set_volume(1)\n time.sleep(new_length)\n else: \n # Fading In\n print \"%s: Volume: Fading in...\" % (time.strftime(\"%H:%M:%S\"))\n time_left = new_length\n volume = 0\n while time_left > 0: # Main Fade\n # increment main fade in volume increase ever stopwatch \n # interval until time is up\n # print\" Volume=%f play%d%d]\", % (volume, time_left, new_length, (stopwatch_interval*stopwatch_count), main_fade_time)\n if stopwatch_count * stopwatch_interval < main_fade_time:\n time.sleep(stopwatch_interval)\n time_left -= stopwatch_interval\n volume = 1.0 * stopwatch_count * stopwatch_interval / main_fade_time\n print \"%s: Fade-In: Volume increased to: %d%%\" % (time.strftime(\"%H:%M:%S\"), volume*100) \n print \"%s: Playing at full volume\" % (time.strftime(\"%H:%M:%S\")) \n\n else:\n volume = 1.0\n time.sleep(time_left)\n time_left = 0\n\n # print\" Volume=%f play[%d of %d] fade[%d of %d]\" % (volume, time_left, new_length, (stopwatch_interval*stopwatch_count), main_fade_time)\n sound.set_volume(volume)\n\n # time.sleep( new_length )\n sound.fadeout(audio_transition_time)\n print \"%s: Play crossfading out (%ds)\" % (time.strftime(\"%H:%M:%S\"), audio_transition_time) \n\n return\n\n\n\n\ndef continious_play_for_set_durration(filename, start_time, stop_time):\n # repeat playing a single file for a set durreation with crossfade\n # between\n\n # play_durration: total time file should be played (s)\n global audio_transition_time # crossfade (ms)\n global stopwatch_count\n global stopwatch_interval # (s)\n global init_time # (s)\n \n play_durration = stop_time - start_time # (s)\n print \"%s: Loading sound file...\" % (time.strftime(\"%H:%M:%S\"))\n trans = audio_transition_time\n sound = pygame.mixer.Sound(filename)\n length = sound.get_length() # file length (s)\n print \"%s: Loaded sound file...\" % (time.strftime(\"%H:%M:%S\"))\n \n t = threading.Thread(target=stopwatch, args=[start_time, stop_time])\n t.setDaemon(True)\n t.start() \n print \"%s: Queue started...\" % (time.strftime(\"%H:%M:%S\"))\n print \" Audiofile length: %dm; Sound interval length: %dm\" % (length//60, play_durration//60)\n \n # fudge = 1\n # time = 0 #seconds\n # n = 0\n \n play_count = play_durration // (length - audio_transition_time/1000)\n playtime_leftover = play_durration - (play_count * (length - audio_transition_time/1000))\n\n # print \" > file_length=%d; trans_time=%d; play_durration=%d\" % (length, audio_transition_time, play_durration)\n # print \" > play_count=%d; playtime_leftover=%d\" % (play_count, playtime_leftover)\n\n\n \n if length > play_durration: \n #file length is longer that play durration\n print \"%s: Queue: File longer than interval, queuing single truncated instance\" % (time.strftime(\"%H:%M:%S\"))\n print \" Queue: playing [1 of 1] %ds\" % (play_durration)\n play_audio(sound, start_time, stop_time, play_durration)\n else: \n # file length shorter than play durration, played more than once\n x = 0\n print \"%s: Queue: File length shorter than play interval, playing multiple instances\" % (time.strftime(\"%H:%M:%S\"))\n while x < play_count:\n print \" Queue: playing #[%d of %d], [%d of %d] seconds\" % (x+1, play_count+1, length, play_durration)\n play_audio(sound, start_time, stop_time, length)\n x += 1\n if playtime_leftover != 0: # play leftover time on play durration\n print \"%s: Queue: playing last truncated instance, %ds\" % (time.strftime(\"%H:%M:%S\"), playtime_leftover)\n play_audio(sound, start_time, stop_time, playtime_leftover)\n\n return\n\n\n\n\ndef stopwatch(start_time, stop_time):\n # sets a global minute counter\n global stopwatch_count\n global stopwatch_interval # (s)\n global init_time # (s)\n # durration (s)\n durration = stop_time - start_time\n\n stopwatch_count = 0\n durration_minutes = durration / 60 # s to m\n\n print \"%s: Stopwatch started (max time = %dm)...\\n\" % (time.strftime(\"%H:%M:%S\"), durration_minutes)\n # max_count = durration_minutes * 6\n max_count = durration / stopwatch_interval\n # print \"max_count=\", max_count\n while stopwatch_count < max_count:\n time.sleep(stopwatch_interval)\n stopwatch_count += 1\n # # print \"%s: Stopwatch: #%d - %ds\" % (time.strftime(\"%H:%M:%S\"), stopwatch_count, stopwatch_interval * stopwatch_count)\n return\n\n\n\n\ndef play_timer(filename, start_time, stop_time):\n # time to play\n global init_time # (s)\n \n print \"%s: Main function started... \\n\" % (time.strftime(\"%H:%M:%S\"))\n # play_durration = stop_time - start_time\n continious_play_for_set_durration(filename, start_time, stop_time)\n\n \n\n\n##########################\n# Global Vars\n# \naudio_transition_time = 2000 # time for audio crossfade (ms)\n# init_start_time = 1\nstart_time = 6 * 60 *60 # (s)\nstop_time = 6 * 60 * 60 + 600 # (s)\nstopwatch_count = 0\nstopwatch_interval = 2 # (s)\nmain_fade_time = 20 # (s)\ninit_time = round(time.time()) # (s)\n# print init_time\n\ntry:\n print \"\\n\\n\\n\"\n print \"%s: Configuration...\" % (time.strftime(\"%H:%M:%S\"))\n # print \" > start_time = %ds; stop_time = %ds; play_durration = %ds;\" % (start_time, stop_time, (stop_time - start_time) )\n # print \" > main_fade_in_time = %ds;transition_time = %dms; stopwatch_interval = %ds;\" % (main_fade_time, audio_transition_time, stopwatch_interval)\n\n pygame.mixer.init()\n pygame.mixer.pre_init(44100, -16, 2, 2048)\n pygame.init()\n print \"%s: Pygame initialized...\\n\" % (time.strftime(\"%H:%M:%S\"))\n\n filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/250Hz_44100Hz_16bit_30sec.ogg'\n # filename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/13_Streamside_Songbirds.ogg'\n # continious_play_for_set_durration(filename, total_play_time)\n play_timer(filename, start_time, stop_time)\n\n time.sleep(7)\n\n\nexcept (KeyboardInterrupt, SystemExit):\n print\n print \"Shutting down modules...\"\n pygame.quit()\n print \"Exiting...\\n\\n\"\n sys.exit(1)\n" }, { "alpha_fraction": 0.5596944689750671, "alphanum_fraction": 0.5638483166694641, "avg_line_length": 36.9603271484375, "blob_id": "7beda6c871858a925fa0d12073afb95fd4b03362", "content_id": "9ca4e95a23df945bd760946440047d4439dad366", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37315, "license_type": "permissive", "max_line_length": 625, "num_lines": 983, "path": "/12-configparser.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# from pygame import mixer\nimport time\nimport threading\nimport sys\nimport os.path\nimport re\n\nDEBUG = True\n\n\n\n\n####################################################################\n# DEFAULT CONFIG\n#\n# there will be no error checking on these values,\n# they are assume to be entered correctly\ndef get_default_config():\n global DEBUG\n\n config_default = {}\n\n config_default['GLOBAL_CONFIG_FILE'] = 'configure.ini'\n # config_default['LOCAL_CONFIG_FILE'] = 'config_local.ini'\n\n config_default['PRIMARY_AUDIO_DIRECTORY'] = 'audio'\n config_default['SECONDARY_AUDIO_DIRECTORY'] = ''\n\n config_default['CITY_LOCATION'] = 'Denver'\n\n config_default['ENABLE_WIFI'] = False\n config_default['WIFI_ACCESS_POINT'] = ''\n config_default['WIFI_PASSWORD'] = ''\n\n config_default['BACKGROUND_SOUND_ENABLE'] = True\n config_default['BACKGROUND_SOUND_FILENAME'] = '13_Streamside_Songbirds.ogg'\n config_default['BACKGROUND_SOUND_VOLUME'] = 70\n\n config_default['ENABLE_FIXED_TIME_MODE'] = True\n config_default['START_OF_THE_DAY_MODE'] = 'fixed'\n config_default['END_OF_THE_DAY_MODE'] = 'fixed'\n config_default['ENABLE_FIXED_LATENIGHT'] = True\n\n config_default['FIXED_TIME_DAWN'] = '05:00'\n config_default['FIXED_TIME_SUNRISE'] = '05:45'\n config_default['FIXED_TIME_MIDMORNING'] = '10:00'\n config_default['FIXED_TIME_MIDAFTERNOON'] = '15:00'\n config_default['FIXED_TIME_SUNSET'] = '20:15'\n config_default['FIXED_TIME_DUSK'] = '20:45'\n config_default['FIXED_TIME_LATENIGHT'] = '22:00'\n\n config_default['DAWN_AUDIO_FILENAME'] = '13_Streamside_Songbirds.ogg'\n config_default['SUNRISE_AUDIO_FILENAME'] = '13_Streamside_Songbirds.ogg'\n config_default['MIDMORNING_AUDIO_FILENAME'] = '13_Streamside_Songbirds.ogg'\n config_default['MIDAFTERNOON_AUDIO_FILENAME'] = '13_Streamside_Songbirds.ogg'\n config_default['SUNSET_AUDIO_FILENAME'] = '13_Streamside_Songbirds.ogg'\n config_default['DUSK_AUDIO_FILENAME'] = '13_Streamside_Songbirds.ogg'\n config_default['LATENIGHT_AUDIO_FILENAME'] = '13_Streamside_Songbirds.ogg'\n\n config_default['OVERLAP_TIME'] = 120\n config_default['AUDIO_FADE_TIME'] = 20\n config_default['DEBUG'] = True\n config_default['AUDIO_CROSSFADE_TIME'] = 2000\n\n DEBUG = config_default['DEBUG']\n\n return config_default\n\n\n####################################################################\n# A boolean checker with some Debug code\ndef truth_or_dare2(config_type, param_name, value):\n\n if value == \"1\" or ( re.search('true|on|set|yes', value, re.IGNORECASE) is not None):\n if DEBUG:\n print \"\\t\\t>> Config: %s file [%s] is true\" % (config_type, param_name)\n return True\n elif value == \"0\" or ( re.search('false|off|unset|no', value, re.IGNORECASE) is not None):\n if DEBUG:\n print \"\\t>> Config: %s file [%s] is false\" % (config_type, param_name)\n return False\n else:\n # print \"\\t>> and is not 0/1, off/on, false/true, unset/set\" \n if DEBUG:\n print \"\\t>> Error: %s file [%s] is not true or false\" % (config_type, param_name) \n return None\n# truth_or_dare2 (end)\n\n\n\n\n####################################################################\n# A boolean checker with some Debug code\ndef truth_or_dare(param_name, param_global, param_user):\n\n #global\n if DEBUG:\n print \"\\t>> Config: GLOBAL file [%s]\" % (param_name) \n if param_global == \"1\" or ( re.search('true|on|set|yes', param_global, re.IGNORECASE) is not None):\n global_true = 1\n if DEBUG:\n print \"\\t\\t>> Config: GLOBAL file [%s] is true\" % (param_name)\n elif param_global == \"0\" or ( re.search('false|off|unset|no', param_global, re.IGNORECASE) is not None):\n global_true = 0\n if DEBUG:\n print \"\\t>> Config: GLOBAL file [%s] is false\" % (param_name)\n else:\n # print \"\\t>> and is not 0/1, off/on, false/true, unset/set\" \n global_true = 3\n if DEBUG:\n print \"\\t>> Config error: GLOBAL file [%s] is not true or false\" % (param_name) \n\n # user\n if DEBUG:\n print \"\\t>> Config: USER file [%s]\" % (param_name) \n if param_user == \"1\" or ( re.search('true|on|set|yes', param_user, re.IGNORECASE) is not None):\n user_true = 1\n if DEBUG:\n print \"\\t\\t>> Config: USER file [%s] is true\" % (param_name) \n elif param_global == \"0\" or ( re.search('false|off|unset|no', param_global, re.IGNORECASE) is not None):\n user_true = 0\n if DEBUG:\n print \"\\t>> Config: USER file [%s] is false\" % (param_name) \n else:\n # print \"\\t>> and is not 0/1, off/on, false/true, unset/set\" \n user_true = 3\n if DEBUG:\n print \"\\t>> Config error: USER file [%s] is not true or false\" % (param_name) \n\n if user_true == 3 and global_true == 3:\n print \"%s: Fatal error: config variable, [%s] is not a directory\" % (time.strftime(\"%H:%M:%S\"), param)\n print \" >> %s\" % (config[param])\n sys.exit(1)\n elif not global_true == 3:\n if global_true == 0:\n print \" >> Config: [%s] is False\" % (param_name) \n return False\n else:\n print \" >> Config: [%s] is True\" % (param_name) \n return True\n else:\n if user_true == 0:\n print \" >> Config: [%s] is False\" % (param_name) \n return False\n else:\n print \" >> Config: [%s] is True\" % (param_name) \n return True\n# truth_or_dare (end)\n\n\n\n\n\n# if param = 1, true, on, set, or yes\ndef is_true(param):\n if param == \"1\" or ( re.search('true|on|set|yes', param, re.IGNORECASE) is not None):\n return True\n else:\n return False\n# is_true (END)\n\n\n\n\n# if param = 0, false, off, unset, or no\ndef is_false(param):\n if param == \"0\" or (re.search('false|off|unset|no', param, re.IGNORECASE) is not None):\n return True\n else:\n return False\n# is_false (END)\n#\n\n\n \n# check if a string is a float:\ndef is_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\n\n\n# check time format, 24-hour clock, HH:MM\ndef is_time_format(value):\n if DEBUG:\n print \"\\t\\t>> is_time_format(%s)\" % value\n try:\n time.strptime(value, '%H:%M')\n return True\n except ValueError:\n return False\n\n\n\n\n\n####################################################################\n# Get the volume from the config string\ndef get_volume(param, value):\n if is_float(value):\n # is a valid float\n value = float(value) \n if value > 100: # volume > 100\n if DEBUG:\n print \"\\t\\t>> Config error: %s greater than 100%%, setting to 100%%\" % (param)\n return 100.0\n elif value < 0: # volume is negative\n if DEBUG:\n print \"\\t\\t>> Config error: %s is negative, setting to 0%%\" % (param)\n return 0.0\n else: # volume is good\n # if DEBUG:\n # print \"\\t\\t>> Config: %s is set to %s%%\" % (param, value)\n return value\n else:\n # not a number\n if DEBUG:\n print \"\\t\\t>> Config error: %s is not a number, ignoring\" % (param)\n return None\n\n\n\ndef hm_to_seconds(t):\n h, m = [int(i) for i in t.split(':')]\n return 3600*h + 60*m\n\n\n\n##################################################################\n# This checks values of configs and corrects them\n#\ndef correct_family_values(config_type, config_in):\n\n params = [ \"LOCAL_CONFIG_FILE\", \n \"PRIMARY_AUDIO_DIRECTORY\",\n \"SECONDARY_AUDIO_DIRECTORY\",\n \"CITY_LOCATION\",\n \"ENABLE_WIFI\",\n \"WIFI_ACCESS_POINT\",\n \"WIFI_PASSWORD\",\n \"BACKGROUND_SOUND_ENABLE\",\n \"BACKGROUND_SOUND_FILENAME\",\n \"BACKGROUND_SOUND_VOLUME\",\n \"ENABLE_FIXED_TIME_MODE\",\n \"START_OF_THE_DAY_MODE\",\n \"END_OF_THE_DAY_MODE\",\n \"ENABLE_DAWN_AUDIO\",\n \"ENABLE_MORNING_AUDIO\",\n \"ENABLE_MIDDAY_AUDIO\",\n \"ENABLE_DUSK_AUDIO\",\n \"ENABLE_NIGHT_AUDIO\",\n \"FIXED_TIME_DAWN\",\n \"FIXED_TIME_SUNRISE\",\n \"FIXED_TIME_MIDMORNING\",\n \"FIXED_TIME_MIDAFTERNOON\",\n \"FIXED_TIME_SUNSET\",\n \"FIXED_TIME_DUSK\",\n \"FIXED_TIME_LATENIGHT\",\n \"DAWN_AUDIO_FILENAME\",\n \"MORNING_AUDIO_FILENAME\",\n \"MIDDAY_AUDIO_FILENAME\",\n \"EVENING_AUDIO_FILENAME\",\n \"DUSK_AUDIO_FILENAME\",\n \"NIGHT_AUDIO_FILENAME\",\n \"DAWN_AUDIO_VOLUME\",\n \"MORNING_AUDIO_VOLUME\",\n \"MIDDAY_AUDIO_VOLUME\",\n \"EVENING_AUDIO_VOLUME\",\n \"DUSK_AUDIO_VOLUME\",\n \"NIGHT_AUDIO_VOLUME\",\n \"OVERLAP_TIME\",\n \"AUDIO_FADE_TIME\",\n \"DEBUG\",\n \"AUDIO_CROSSFADE_TIME\",\n \"start_time\",\n \"stop_time\"]\n\n #####################################################\n # Process each parameter for configs\n # \n \n config_out = {}\n for param in params:\n # global config\n # config_source[param] = 'none'\n # config[param] = ''\n if not config_in.has_key(param):\n config_out[param] = None\n if DEBUG:\n print \"\\t>> %s config [%s] is missing\" % (config_type, param)\n elif config_in[param] == \"\" or config_in[param] == \"''\" or config_in[param] == \"\\\"\\\"\" :\n config_out[param] = None\n if DEBUG:\n print \"\\t>> %s config [%s] is blank\" % (config_type, param) \n else:\n config_out[param] = config_in[param].translate(None, '\\\"\\'')\n if DEBUG:\n print \"\\t>> %s config [%s]=[%s]\" % (config_type, param, config_out[param]) \n # e\n \n\n if DEBUG:\n print \"\\t>> Scripts location: [%s]\" % (get_script_path())\n\n\n\n\n ###############################################\n # LOCAL_CONFIG_FILE\n #\n param = 'LOCAL_CONFIG_FILE'\n \n if config_type == \"LOCAL\":\n config_out[param] = None\n #ignoring\n elif config_out[param] is None:\n if DEBUG:\n print \"\\t>> %s config: %s is missing\" % (config_type, param)\n # checking relative path\n elif os.path.isfile(os.path.normpath(os.path.join(get_script_path(), config_out[param]))):\n config_out[param] = os.path.normpath(os.path.join(get_script_path(), config_out[param]))\n if DEBUG:\n print \"\\t>> %s config: %s is a relative path\" % (config_type, param)\n print \"\\t>> %s\" % (config_out[param])\n #checking absolute path\n elif os.path.isfile(os.path.normpath(config_out[param])):\n config_out[param] = os.path.normpath(config_out[param])\n if DEBUG:\n print \"\\t>> %s config: %s is an absolute path\" % (config_type, param)\n print \"\\t>> %s\" % (config_out[param])\n else:\n config_out[param] = None\n if DEBUG:\n print \"\\t>> %s config: %s does not exist\" % (config_type, param)\n\n\n\n\n ###############################################\n # Audio directories\n #\n # Process AUDIO_DIR\n params = ['PRIMARY_AUDIO_DIRECTORY', 'SECONDARY_AUDIO_DIRECTORY']\n for param in params:\n # config[param] = config[param]\n # config[param] = config[param].translate(None, '\\\"\\'')\n if DEBUG:\n print \"\\t>> config: [%s]\" % (param)\n if config_out[param] is None:\n if DEBUG:\n print \"\\t>> %s config: %s is missing\" % (config_type, param)\n # checking relative path\n elif os.path.isdir(os.path.normpath(os.path.join(get_script_path(), config_out[param]))):\n config_out[param] = os.path.normpath(os.path.join(get_script_path(), config_out[param]))\n if DEBUG:\n print \"\\t>> %s config: %s is a relative path\" % (config_type, param)\n print \"\\t>> %s\" % (config_out[param])\n #checking absolute path\n elif os.path.isdir(os.path.normpath(config_out[param])):\n config_out[param] = os.path.normpath(config_out[param])\n if DEBUG:\n print \"\\t>> %s config: %s is an absolute path\" % (config_type, param)\n print \"\\t>> %s\" % (config_out[param])\n\n\n\n\n ##############################################\n # Background sound\n #\n param = 'BACKGROUND_SOUND_ENABLE'\n config_out[param] = truth_or_dare2(config_type, param, config_out[param])\n # Background sound enable\n if config_out['BACKGROUND_SOUND_ENABLE']:\n # background sound filename\n param = 'BACKGROUND_SOUND_FILENAME'\n # Dont check file location yet\n # if not get_audiofile_location2(config_type, param, config_out[param], config_out['PRIMARY_AUDIO_DIRECTORY'], config_out['SECONDARY_AUDIO_DIRECTORY']) == None:\n # background sound filename is valid\n # config_out[param] = get_audiofile_location2(config_type, param, config_out[param], config_out['PRIMARY_AUDIO_DIRECTORY'], config_out['SECONDARY_AUDIO_DIRECTORY'])\n # print \"\\t>> %s config: BACKGROUND_SOUND is enabled\" % (config_type)\n # print \"\\t>> %s Config: BACKGROUND_SOUND_FILENAME is \" % (config_type)\n # print \"\\t %s\" % (config_out['BACKGROUND_SOUND_FILENAME'])\n if is_audiofile(config_out[param]):\n # Setting BACKGROUND_SOUND_VOLUME\n if DEBUG:\n print \"\\t>> %s config, BACKGROUND_SOUND_FILENAME is [%s]\" % (config_type, config_out[param])\n param = 'BACKGROUND_SOUND_VOLUME'\n if is_float(config_out[param]):\n # USER config's VOLUME is a float\n config_out[param] = get_volume(param, config_out[param])\n if DEBUG:\n print \"\\t>> %s config, BACKGROUND_SOUND_VOUME is [%s%%]\" % (config_type, config_out[param])\n else:\n # BACKGROUND_SOUND_VOLUME is missing or invalid\n if DEBUG:\n print \"\\t>> %s config: error: BACKGROUND_SOUND_VOLUME is invalid, BACKGROUND_SOUND is disabled\" % (config_type)\n config_out['BACKGROUND_SOUND_ENABLE'] = False\n config_out['BACKGROUND_SOUND_FILENAME'] = None\n config_out['BACKGROUND_SOUND_VOLUME'] = None\n else: # file not mp3/wav/ogg\n if DEBUG:\n print \"\\t>> %s config: error: BACKGROUND_SOUND_VOLUME is not wav/ogg/mp3, BACKGROUND_SOUND is disabled\" % (config_type)\n config_out['BACKGROUND_SOUND_ENABLE'] = False\n config_out['BACKGROUND_SOUND_FILENAME'] = None\n config_out['BACKGROUND_SOUND_VOLUME'] = None\n else: # BACKGROUND_SOUND_ENABLE is False\n if DEBUG:\n print \"\\t>> %s config: error: BACKGROUND_SOUND_VOLUME is not wav/ogg/mp3, BACKGROUND_SOUND is disabled\" % (config_type)\n config_out['BACKGROUND_SOUND_ENABLE'] = False\n config_out['BACKGROUND_SOUND_FILENAME'] = None\n config_out['BACKGROUND_SOUND_VOLUME'] = None\n\n\n\n #################################################################\n # TIMES_OF_DAY modes\n #\n\n # MODES: checking TIME_OF_DAY - MODEs\n param = 'ENABLE_FIXED_TIME_MODE'\n config_out[param] = truth_or_dare2(config_type, param, config_out[param])\n\n param = 'ENABLE_DAWN_AUDIO'\n config_out[param] = truth_or_dare2(config_type, param, config_out[param])\n\n param = 'ENABLE_MORNING_AUDIO'\n config_out[param] = truth_or_dare2(config_type, param, config_out[param]) \n\n param = 'ENABLE_MIDDAY_AUDIO'\n config_out[param] = truth_or_dare2(config_type, param, config_out[param])\n\n param = 'ENABLE_DUSK_AUDIO'\n config_out[param] = truth_or_dare2(config_type, param, config_out[param])\n\n param = 'ENABLE_NIGHT_AUDIO'\n config_out[param] = truth_or_dare2(config_type, param, config_out[param]) \n\n check_times = False\n check_time_error = False\n param = 'START_OF_THE_DAY_MODE'\n if config_out[param] is not None:\n if re.search('fixed', config_out[param], re.IGNORECASE) is not None: \n config_out[param] = 'fixed'\n check_times = True\n elif re.search('dawn', config_out[param], re.IGNORECASE) is not None: \n config_out[param] = 'dawn'\n elif re.search('sunrise', config_out[param], re.IGNORECASE) is not None: \n config_out[param] = 'sunrise'\n else:\n config_out[param] = None\n\n param = 'END_OF_THE_DAY_MODE'\n if config_out[param] is not None:\n if re.search('fixed', config_out[param], re.IGNORECASE) is not None: \n config_out[param] = 'fixed'\n check_times = True\n elif re.search('dawn', config_out[param], re.IGNORECASE) is not None: \n config_out[param] = 'dusk'\n elif re.search('sunrise', config_out[param], re.IGNORECASE) is not None: \n config_out[param] = 'sunset'\n elif re.search('latenight', config_out[param], re.IGNORECASE) is not None: \n config_out[param] = 'latenight'\n\n else:\n config_out[param] = None\n\n\n # Checking FIXED modes with TIMES\n # check_times = False\n if config_out['ENABLE_FIXED_TIME_MODE'] is None:\n # ENABLE_FIXED_TIME_MODE is not true or false so diabling all \n # time mode valiables\n if DEBUG:\n print \"\\t>> %s config, ENABLE_FIXED_TIME_MODE is invalid\" % (config_type)\n print \"\\t>> %s config, disabling START_OF_THE_DAY_MODE, END_OF_THE_DAY_MODE, ENABLE_FIXED_LATENIGHT, FIXED_TIME_DAWN, FIXED_TIME_SUNRISE, FIXED_TIME_MIDMORNING, FIXED_TIME_MIDAFTERNOON, FIXED_TIME_SUNSET, FIXED_TIME_DUSK, FIXED_TIME_LATENIGHT\" % (config_type)\n config_out['START_OF_THE_DAY_MODE'] = None\n config_out['END_OF_THE_DAY_MODE'] = None\n config_out['ENABLE_FIXED_LATENIGHT'] = None\n config_out['FIXED_TIME_DAWN'] = None\n config_out['FIXED_TIME_SUNRISE'] = None\n config_out['FIXED_TIME_MIDMORNING'] = None\n config_out['FIXED_TIME_MIDAFTERNOON'] = None\n config_out['FIXED_TIME_SUNSET'] = None\n config_out['FIXED_TIME_DUSK'] = None\n config_out['FIXED_TIME_LATENIGHT'] = None\n # disable = True\n check_times = False\n elif config_out['ENABLE_FIXED_TIME_MODE']:\n # ENABLE_FIXED_TIME_MODE is true, checking fixed times_of_day\n config_out['START_OF_THE_DAY_MODE'] = None\n config_out['END_OF_THE_DAY_MODE'] = None\n config_out['ENABLE_FIXED_LATENIGHT'] = None\n # disable = False\n check_times = True\n # Check is each of the FIXED_TIME_* are valid formats\n else: # FIXED_TIME_MODE is false\n if config_out['START_OF_THE_DAY_MODE'] == \"fixed\":\n # disable = False\n check_times = True\n if config_out['END_OF_THE_DAY_MODE'] == \"fixed\":\n # disable = False\n check_times = True\n # config_out['ENABLE_FIXED_TIME_MODE'] = False\n \n\n ############################## \n # TIMES: checking FIXED_TIME_*\n # check_times_error = False\n if check_times: # checking times is needed\n check_times_error = False\n param = 'FIXED_TIME_DAWN'\n if not is_time_format(config_out[param]):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config, %s time format is invalid\" % (config_type, param)\n param = 'FIXED_TIME_SUNRISE'\n if not is_time_format(config_out[param]):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config, %s time format is invalid\" % (config_type, param)\n param = 'FIXED_TIME_MIDMORNING'\n if not is_time_format(config_out[param]):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config, %s time format is invalid\" % (config_type, param)\n param = 'FIXED_TIME_MIDAFTERNOON'\n if not is_time_format(config_out[param]):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config, %s time format is invalid\" % (config_type, param)\n param = 'FIXED_TIME_SUNSET'\n if not is_time_format(config_out[param]):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config, %s time format is invalid\" % (config_type, param)\n param = 'FIXED_TIME_DUSK'\n if not is_time_format(config_out[param]):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config, %s time format is invalid\" % (config_type, param)\n param = 'FIXED_TIME_LATENIGHT'\n if not is_time_format(config_out[param]):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config, %s time format is invalid\" % (config_type, param)\n # Check if FIXED_TIME_*, times_of_day, are in the correct order\n # and are not the same\n if not check_times_error:\n if hm_to_seconds(config_out['FIXED_TIME_DAWN']) > hm_to_seconds(config_out['FIXED_TIME_SUNRISE']) or hm_to_seconds(config_out['FIXED_TIME_SUNRISE']) > hm_to_seconds(config_out['FIXED_TIME_MIDMORNING']) or hm_to_seconds(config_out['FIXED_TIME_MIDMORNING']) > hm_to_seconds(config_out['FIXED_TIME_MIDAFTERNOON']) or hm_to_seconds(config_out['FIXED_TIME_MIDAFTERNOON']) > hm_to_seconds(config_out['FIXED_TIME_SUNSET']) or hm_to_seconds(config_out['FIXED_TIME_SUNSET']) > hm_to_seconds(config_out['FIXED_TIME_DUSK']) or hm_to_seconds(config_out['FIXED_TIME_DUSK']) > hm_to_seconds(config_out['FIXED_TIME_LATENIGHT']):\n check_times_error = True\n if DEBUG:\n print \"\\t>> %s config: error times of day are out of order\" % (config_type)\n\n if check_times_error: # there is a error in time, call dr who\n config_out['ENABLE_FIXED_TIME_MODE'] = None\n config_out['START_OF_THE_DAY_MODE'] = None\n config_out['END_OF_THE_DAY_MODE'] = None\n config_out['FIXED_TIME_DAWN'] = None\n config_out['FIXED_TIME_SUNRISE'] = None\n config_out['FIXED_TIME_MIDMORNING'] = None\n config_out['FIXED_TIME_MIDAFTERNOON'] = None\n config_out['FIXED_TIME_SUNSET'] = None\n config_out['FIXED_TIME_DUSK'] = None\n config_out['FIXED_TIME_LATENIGHT'] = None\n if DEBUG:\n print \"\\t>> %s config: error found, ignoring ENABLE_FIXED_TIME_MODE, START_OF_THE_DAY_MODE, END_OF_THE_DAY_MODE, FIXED_TIME_DAWN, FIXED_TIME_SUNRISE, FIXED_TIME_MIDMORNING, FIXED_TIME_MIDAFTERNOON, FIXED_TIME_SUNSET, FIXED_TIME_DUSK, and FIXED_TIME_LATENIGHT for %s config\" % (config_type, config_type)\n\n else: # no time errors found, format ok, order ok\n if DEBUG:\n print \"\\t>> %s config: %s OK [%s]\" % (config_type, 'FIXED_TIME_DAWN', config_out['FIXED_TIME_DAWN'])\n print \"\\t>> %s config: %s OK [%s]\" % (config_type, 'FIXED_TIME_SUNRISE', config_out['FIXED_TIME_SUNRISE'])\n print \"\\t>> %s config: %s OK [%s]\" % (config_type, 'FIXED_TIME_MIDMORNING', config_out['FIXED_TIME_MIDMORNING'])\n print \"\\t>> %s config: %s OK [%s]\" % (config_type, 'FIXED_TIME_MIDAFTERNOON', config_out['FIXED_TIME_MIDAFTERNOON'])\n print \"\\t>> %s config: %s OK [%s]\" % (config_type, 'FIXED_TIME_SUNSET', config_out['FIXED_TIME_SUNSET'])\n print \"\\t>> %s config: %s OK [%s]\" % (config_type, 'FIXED_TIME_DUSK', config_out['FIXED_TIME_DUSK'])\n print \"\\t>> %s config: %s OK [%s]\" % (config_type, 'FIXED_TIME_LATENIGHT', config_out['FIXED_TIME_LATENIGHT'])\n\n ########################\n # Checking [Filenames]\n\n if not is_audiofile(config_out['DAWN_AUDIO_FILENAME']):\n config_out['DAWN_AUDIO_FILENAME'] = None\n if DEBUG:\n print \"\\t>> %s config: error %s is not an mp3, ogg or wav\" % (config_type, 'DAWN_AUDIO_FILENAME')\n if not is_audiofile(config_out['MORNING_AUDIO_FILENAME']):\n config_out['MORNING_AUDIO_FILENAME'] = None\n if DEBUG:\n print \"\\t>> %s config: error %s is not an mp3, ogg or wav\" % (config_type, 'MORNING_AUDIO_FILENAME')\n if not is_audiofile(config_out['MIDDAY_AUDIO_FILENAME']):\n config_out['MIDDAY_AUDIO_FILENAME'] = None\n if DEBUG:\n print \"\\t>> %s config: error %s is not an mp3, ogg or wav\" % (config_type, 'MIDDAY_AUDIO_FILENAME')\n if not is_audiofile(config_out['EVENING_AUDIO_FILENAME']):\n config_out['EVENING_AUDIO_FILENAME'] = None\n if DEBUG:\n print \"\\t>> %s config: error %s is not an mp3, ogg or wav\" % (config_type, 'EVENING_AUDIO_FILENAME')\n if not is_audiofile(config_out['DUSK_AUDIO_FILENAME']):\n config_out['DUSK_AUDIO_FILENAME'] = None\n if DEBUG:\n print \"\\t>> %s config: error %s is not an mp3, ogg or wav\" % (config_type, 'DUSK_AUDIO_FILENAME')\n if not is_audiofile(config_out['NIGHT_AUDIO_FILENAME']):\n config_out['NIGHT_AUDIO_FILENAME'] = None\n if DEBUG:\n print \"\\t>> %s config: error %s is not an mp3, ogg or wav\" % (config_type, 'NIGHT_AUDIO_FILENAME')\n\n\n #############################\n # Check individual volumes\n param = 'DAWN_AUDIO_VOLUME' \n config_out['param'] = get_volume(param, config_out[param]) \n if DEBUG: \n if config_out[param] is None: \n print \"\\t>> %s config, error %s invalid\" % (config_type, param) \n else: \n print \"\\t>> %s config, %s [%s%%]\" % (config_type, param, config_out[param])\n param = 'MORNING_AUDIO_VOLUME' \n config_out['param'] = get_volume(param, config_out[param]) \n if DEBUG: \n if config_out[param] is None: \n print \"\\t>> %s config, error %s invalid\" % (config_type, param) \n else: \n print \"\\t>> %s config, %s [%s%%]\" % (config_type, param, config_out[param])\n param = 'MIDDAY_AUDIO_VOLUME' \n config_out['param'] = get_volume(param, config_out[param]) \n if DEBUG: \n if config_out[param] is None: \n print \"\\t>> %s config, error %s invalid\" % (config_type, param) \n else: \n print \"\\t>> %s config, %s [%s%%]\" % (config_type, param, config_out[param])\n param = 'EVENING_AUDIO_VOLUME' \n config_out['param'] = get_volume(param, config_out[param]) \n if DEBUG: \n if config_out[param] is None: \n print \"\\t>> %s config, error %s invalid\" % (config_type, param) \n else: \n print \"\\t>> %s config, %s [%s%%]\" % (config_type, param, config_out[param])\n param = 'DUSK_AUDIO_VOLUME' \n config_out['param'] = get_volume(param, config_out[param]) \n if DEBUG: \n if config_out[param] is None: \n print \"\\t>> %s config, error %s invalid\" % (config_type, param) \n else: \n print \"\\t>> %s config, %s [%s%%]\" % (config_type, param, config_out[param])\n param = 'NIGHT_AUDIO_VOLUME' \n config_out['param'] = get_volume(param, config_out[param]) \n if DEBUG: \n if config_out[param] is None: \n print \"\\t>> %s config, error %s invalid\" % (config_type, param) \n else: \n print \"\\t>> %s config, %s [%s%%]\" % (config_type, param, config_out[param])\n\n\n return config_out\n# correct_family_values (END)\n\n\n\n\n\n\n\ndef is_audiofile(filename):\n if re.search('mp3|wav|ogg', os.path.splitext(filename)[1], re.IGNORECASE) is not None:\n return True\n else:\n return False\n# def is_audiofile (END)\n\n\n\n\n\ndef check_audiofile_existance(file_name, primary_dir, secondary_dir):\n if DEBUG:\n print \"\\t:>>file_name %s\" % (file_name)\n print \"\\t:>> primary_dir %s\" % (primary_dir) \n print \"\\t:>> secondary_dir %s\" % (secondary_dir)\n print \"\\t:>> prime+filename %s\" % (os.path.join(primary_dir, file_name))\n print \"\\t:>> secondary+filename %s\" % (os.path.join(secondary_dir, file_name))\n # print \"\\t:>> %s\" % ()\n # print \"\\t:>> %s\" % ()\n # print \"\\t:>> %s\" % ()\n if os.path.isabs(file_name) and os.path.isfile(file_name):\n return file_name\n elif os.path.isfile(os.path.join(primary_dir, file_name)):\n return os.path.join(primary_dir, file_name)\n elif os.path.isfile(os.path.join(secondary_dir, file_name)):\n return os.path.join(secondary_dir, file_name)\n else:\n return None\n# def check_audiofile_existance (END)\n\n\n\n#####################################################\n# Checks an audiofile's existance, if its an mp3/ogg/wav, \n# and it returns its full path\ndef get_audiofile_location2(config_type, param, value, primary_audio_directory, secondary_audio_directory):\n\n\n if value is not None: # it's not blank\n if is_audiofile(value): # check if actually mp3/wav/ogg\n if DEBUG:\n print \"\\t\\t>> %s config, [%s] is an mp3, wav or ogg\" % (config_type, param)\n if check_audiofile_existance(value, primary_audio_directory, secondary_audio_directory) is not None: # check if file exists\n audiofile_location = check_audiofile_existance(value, primary_audio_directory, secondary_audio_directory)\n if DEBUG:\n print \"\\t\\t>> %s config, [%s] is: \" % (config_type, param)\n print \"\\t\\t>> : %s\" % (audiofile_location)\n return audiofile_location # you are a winner\n else: # file DNE\n if DEBUG:\n print \"\\t\\t>> %s config, [%s] not found\" % (config_type, param)\n return None\n else: # not an mp3/wav/ogg\n if DEBUG:\n print \"\\t\\t>> %s config, [%s] is not a wav/mp3/ogg file\" % (config_type, param)\n return None\n # Trying GLOBAL config, USER config failed\n else:\n if DEBUG:\n print \"\\t\\t\\t>> %s config, [%s] is blank or missing\" % (config_type, param)\n\n return None\n# get_audiofile_location2 (END)\n\n\n\n\n\n\n# returns an full file link to an audiofile\ndef get_audiofile_location(param, config_user_missing, config_global_missing, config_user, config_global, primary_audio_directory, secondary_audio_directory):\n\n if DEBUG:\n print \"\\t\\t>> Checking [%s] file location\" % (param)\n print \"\\t\\t>> config_user_missing %s\" % (config_user_missing)\n print \"\\t\\t>> config_global_missing %s\" % (config_global_missing)\n print \"\\t\\t>> config_user %s\" % (config_user)\n print \"\\t\\t>> config_global %s\" % (config_global)\n print \"\\t\\t>> primary_audio_directory %s\" % (primary_audio_directory)\n print \"\\t\\t>> secondary_audio_directory %s\" % (secondary_audio_directory)\n\n if not config_user_missing: # it's not blank\n if DEBUG:\n print \"\\t\\t>> Checking for USER config filename\"\n if is_audiofile(config_user): # check if actually mp3/wav/ogg\n if DEBUG:\n print \"\\t\\t\\t>> file is an mp3, wav or ogg\"\n if check_audiofile_existance(config_user, primary_audio_directory, secondary_audio_directory) is not None: # check if file exists\n audiofile_location = check_audiofile_existance(config_user, primary_audio_directory, secondary_audio_directory)\n if DEBUG:\n print \"\\t\\t\\t>> Using audiofile from USER config: \"\n print \"\\t: %s\" % (audiofile_location)\n return audiofile_location # you are a winner\n else: # file DNE\n if DEBUG:\n print \"\\t\\t\\t>> file not found, trying GLOBAL config\"\n else: # not an mp3/wav/ogg\n if DEBUG:\n print \"\\t\\t\\t>> is not a wav, mp3, or ogg file, trying GLOBAL config\"\n # Trying GLOBAL config, USER config failed\n elif not config_global_missing: \n if DEBUG:\n print \"\\t\\t>> Checking for GLOBAL config filename\"\n\n\n # all has failed\n if DEBUG:\n print \"\\t>> No valid audiofile found\"\n return None\n\n# get_audiofile_location (END)\n\n\n\n\n\n#########################################\n# returns scripts location\ndef get_script_path():\n return os.path.dirname(os.path.realpath(sys.argv[0]))\n# def get_script_path (END)\n\n\n\n\n\n##############################################################\n# read config file, return in a dictionary\n#\ndef parse_config_file(config_type, config_filename):\n from ConfigParser import ConfigParser\n \n config_dic = {}\n \n if not os.path.exists(config_filename):\n print \"%s: Error: %s config file does not exist [%s]\" % (time.strftime(\"%H:%M:%S\"), config_type, config_filename)\n config_dic['CONFIG_EXISTS'] = False\n else:\n config_dic['CONFIG_EXISTS'] = True\n\n # get global config values\n parser_dic = ConfigParser()\n parser_dic.optionxform = str\n parser_dic.read(config_filename)\n \n if DEBUG:\n print \"\\t>> %s Config file: %s\" % (config_type, config_filename)\n # print \"\\t >> %s Sections %s\" % (config_type, parser_dic.sections())\n for section_name in parser_dic.sections():\n if DEBUG:\n print '\\t >> %s Section: %s' % (config_type, section_name)\n # print '\\t >> %s Options: %s' % (config_type, parser_dic.options(section_name))\n for name, value in parser_dic.items(section_name):\n if DEBUG:\n print '\\t >> %s Value: %s = %s' % (config_type, name, value)\n config_dic[name] = value\n\n return config_dic\n\n\n\n#########################################\n# main configure\ndef configure():\n\n \n ###################################\n # get DEFAULT config\n #\n config_default = {}\n config_default = get_default_config()\n\n\n ###################################\n # get GLOBAL config\n #\n config_global = {}\n # get filename\n config_global_filename = config_default['GLOBAL_CONFIG_FILE']\n # get config\n config_global = parse_config_file('GLOBAL', config_global_filename)\n \n\n\n ##################################\n # get USER config\n #\n config_local = {}\n # get config\n if config_global.has_key('LOCAL_CONFIG_FILE'):\n config_local_filename = config_global['LOCAL_CONFIG_FILE']\n config_local = parse_config_file('LOCAL', config_local_filename)\n else:\n config_local['CONFIG_EXISTS'] = False\n\n\n ##################################\n # Check Values\n #\n # defaults are not checked\n config_global = correct_family_values('GLOBAL', config_global)\n # config = get_family_values(config_global,config_user)\n\n # # config_user_filename = config_default['LOCAL_CONFIG_FILE']\n # if not os.path.exists(config_user_filename):\n # print \"%s: Fatal error: user config file does not exist [%s]\" % (time.strftime(\"%H:%M:%S\"), config_user_filename)\n # sys.exit(1)\n\n\n # # get user config values\n # parser_user = ConfigParser()\n # parser_user.optionxform = str\n # parser_user.read(config_user_filename)\n \n # config_user = {}\n # if DEBUG:\n # print \"\\t>>Config file: %s\" % (config_user_filename)\n # print \"\\t>> Sections %s\" % parser_user.sections()\n # for section_name in parser_user.sections():\n # if DEBUG:\n # print '\\t>> Section:', section_name\n # print '\\t>> Options:', parser_user.options(section_name)\n # for name, value in parser_user.items(section_name):\n # if DEBUG:\n # print '\\t>> Value: %s = %s' % (name, value)\n # config_user[name] = value\n\n\n\n\n # get user config values\n # config_user = {}\n # if not os.path.exists(config_user_filename):\n # print \"%s: User config file does not exist [%s], using global settings\" % (time.strftime(\"%H:%M:%S\"), config_user_filename)\n # config_user[\"exists\"] = False\n # else:\n # config_user[\"exists\"] = True\n # print \"%s: User config file located [%s]\" % (time.strftime(\"%H:%M:%S\"), config_user_filename)\n\n # parser_user = ConfigParser()\n # parser_user.read(config_user_filename)\n # if DEBUG:\n # print \"\\t>>Config file: %s\" % (config_user_filename)\n # print \"\\t>> Sections %s\" % parser_user.sections()\n # for section_name in parser_user.sections():\n # if DEBUG:\n # print '\\t>> Section:', section_name\n # print '\\t>> Options:', parser_user.options(section_name)\n # for name, value in parser_user.items(section_name):\n # if DEBUG:\n # print '\\t>> Value: %s = %s' % (name, value)\n # config_user[name] = value\n\n\n\n # print config.sections()\n\n # import re\n # i = \"sest\"\n # print truth_or_dare(\"i\", i)\n \n\n\n # print config.get('topsecret.server.com','ForwardX11')\n # 'SectionOne' in config\n # print \"Yippy\"\n # print \"Hello %s. You are %s years old.\" % (Name, Age)\n return\n# def configure (END)\n\n\n\n\n\n\n#############################################################\n# Code\n#\n\n# Main Function\ndef main():\n # main function\n\n print \"\\n\\n\\n\"\n \n # CONFIGURE\n print \"%s: Configuring...\" % (time.strftime(\"%H:%M:%S\"))\n configure()\n print \"done.\"\n \n # while True:\n # print \".\"\n # time.sleep(60)\n \n # EXIT\n time.sleep(1)\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n return\n# def main (END)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except (KeyboardInterrupt, SystemExit):\n print\n print \"%s: Exiting...\\n\\n\"% (time.strftime(\"%H:%M:%S\"))\n sys.exit(1)\n\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n" }, { "alpha_fraction": 0.7501044869422913, "alphanum_fraction": 0.7576264142990112, "avg_line_length": 30.077922821044922, "blob_id": "dde5b43dede65026c9a30ba962fd2c24e64bae52", "content_id": "80649e01477fb5344b21f1d6834b90427721327c", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2395, "license_type": "permissive", "max_line_length": 497, "num_lines": 77, "path": "/README.md", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "# nature\\_emulator\n\n\n# Components\n* Rasberry Pi\n* Real time clock - saves time on power off\n* Class-D amplifier\n* 2 Speakers\n* USB drive - stores config, audiofiles and nature emulator software\n* LCD screen - for time display and basic config\n* Scrolling buttons - for setting time and basic config\n* Error LED - displays basic errors that arn't shown on screen\n\n\n# How it works\nThe Rasberry Pi runs a basic Linux OS, with a few scripts for basic operation. A USB drive stores a configuration file, audio files, and the nature\\_emulator software. Software on the USB drive can configure the nature\\_emulator can be edited manually. Audio files can be added to to the drive, and set in the config. The nature\\_emulator software runs off the drive. Configuration file, audio files, and the nature\\_emulator software can be modified added and upgraded by removing the drive.\n\n# Detailed operation\n1. Rasberry Pi boots\n2. init scripts\n\t* check for errors, display on LED\n\t* Loads screen\n\t* Detect USB drive and run nature_emulator\n3. nature\\_emulator\n\t1. Read config\n\t2. Detect errors, override if posible or indicate error\n\t2. Check if init scripts need to be updated\n\t2. Selected mode\n\t\t* Single play mode\n\t\t* Fixed time mode\n\t\t* Dynamic sunset/sunrise mode\n\t\t* Combination Fixed/Dynamic mode\n\t3. Detect current time\n\t4. Queue files to play for the day\n\t5. Wait for midnight\n\t6. Return to step 4.\n\t\t\n\n# USB drive file structure\n\tconfigure.ini\n\tsetup_windows.exe\n\tsetup_linux\n\tsetup_osx\n\taudiofiles/\n\tnature_emulator/\n\tnature_emulator/nature_emulator.py\n\tnature_emulator/audiofiles/\n\n\n# TODO\n* finish nature\\_emulator soft\n* create setup software\n* create init scripts\n\t* LCD display\n\t* error LED\n\t* detect USB\n* Add RTC hardware\n* Add LCD hardware\n* Create 3D model of container\n* Test, test, test\n\n\n# Sounds of Nature sources\n\nhttp://justme.land/5-sources-for-fascinating-sounds-of-nature/\n\n\nThe Sounds of Nature Collection by Gaia, https://archive.org/details/Sounds\\_of\\_Nature\\_Collection - Creative Commons - Attribution 3.0\n\nBritish library – Listen to the sounds of nature - http://www.bl.uk/listentonature/main.html - Copyrighted\n\n## Animal sounds\nhttp://macaulaylibrary.org/ - copyrighted (huge library, short clips)\nUS Fish and Wildlife Service - https://archive.org/details/animalsounds1 - Public Domain (short clips)\n\n## Bird Calls\nhttp://www.xeno-canto.org/ - various licenses\n" }, { "alpha_fraction": 0.6167512536048889, "alphanum_fraction": 0.6713197827339172, "avg_line_length": 17.11494255065918, "blob_id": "5e113f874acd63e9fcefae3b84ddb6787063685b", "content_id": "2338b1148035965b5e4200ade8036ae945f87694", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1576, "license_type": "permissive", "max_line_length": 94, "num_lines": 87, "path": "/steps/multiplay.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\nfrom time import sleep\nimport threading\n\ndef play_audio(file_name):\n print \"playing sound\"\n sounda= pygame.mixer.Sound(file_name)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0)\n sleep(0.5)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0)\n sleep(0.5)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0)\n sleep(0.5)\n sounda= pygame.mixer.Sound(file_name)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0)\n sleep(0.5)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0)\n sleep(0.5)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0)\n sleep(0.5)\n return\n\npygame.mixer.init()\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.init()\nprint \"Running Prog\"\n\nfilename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/blackbird.wav'\nevent1 = threading.Timer(5, play_audio, [filename])\nevent1.start()\n\nsleep(1)\n# pygame.time.wait(1000)\nprint \"1\"\nsleep(1)\nprint \"2\"\n\nsleep(1)\nprint \"3\"\n\nsleep(1)\nprint \"4\"\n\nsleep(1)\nprint \"5\"\n\nsleep(1)\nprint \"6\"\n\nsleep(1)\nprint \"7\"\n\nsleep(1)\nprint \"8\"\n\nsleep(1)\nprint \"9\"\n\nsleep(1)\nprint \"10\"\n\nsleep(1)\nprint \"11\"\n\n# soundb= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n# soundc= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n# soundd= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n\n# print \"one\"\n# leep(1)\n# print \"two\"\n# soundb.play()\n# sleep(1)\n# print \"three\"\n# soundc.play()\n# sleep(1)\n# print \"four\"\n# soundd.play()\n\nsleep(7)\n" }, { "alpha_fraction": 0.5627883076667786, "alphanum_fraction": 0.5996924638748169, "avg_line_length": 19.97849464416504, "blob_id": "6641f228a62d86959823b1d76e6dc3ab7212b6f4", "content_id": "fe9280be432168043a66379336f21848fa5aee3d", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1951, "license_type": "permissive", "max_line_length": 96, "num_lines": 93, "path": "/nature_emulator.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7 \n\n#########################################################################\n#\tNature Emulator\n#\tAuthor: ZyMOS\n#\tDate: 16 April 2015\n#\t\n#\tDescription: This \n#\n#\tFiles:\tnature_emulator-init.sh: \n#\t\t\tinit script runs python script\n#\t\tnature_emulator.py: \n#\t\t\tpython script that does the work\n#\n# Requirements: pygame, pymad, astral, pytz\n#\n# References:\n# http://michelanders.blogspot.ru/2010/12/calulating-sunrise-and-sunset-in-python.html\n# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\n#\n##########################################################################\n\n\n################################\n# Config\n#\n\ncity_name = 'Denver'\naudio_dir = '/pub/audio/nature/Sounds_of_Nature_Collection'\n\naudiofile_dawn = '44_Predawn.mp3'\naudiofile_morning = ''\naudiofile_late_morning = ''\n\naudiofile_noon = ''\naudiofile_early_afternoon = ''\naudiofile_afternoon = ''\naudiofile_late_afternoon = ''\n\naudiofile_early_dusk = ''\naudiofile_dusk = ''\naudiofile_night = ''\n\n\naudiofile_00h = ''\naudiofile_01h = ''\naudiofile_02h = ''\naudiofile_03h = ''\naudiofile_04h = ''\naudiofile_05h = ''\naudiofile_06h = ''\naudiofile_07h = ''\naudiofile_08h = ''\naudiofile_09h = ''\naudiofile_11h = ''\naudiofile_12h = ''\naudiofile_13h = ''\naudiofile_14h = ''\naudiofile_15h = ''\naudiofile_16h = ''\naudiofile_17h = ''\naudiofile_18h = ''\naudiofile_19h = ''\naudiofile_20h = ''\naudiofile_21h = ''\naudiofile_22h = ''\naudiofile_23h = ''\n\n\n\n###############################\n# Code\n#\nimport time\nimport os\nimport mad # pymad\nimport pygame\nimport datetime\nfrom astral import Astral\n\n\ndef get_mp3_length( mp3_file ):\n # This returns the length in mili-seconds of the mp3 file\n audio = mad.MadFile(mp3_file)\n mp3_length = audio.total_time() \n print \"mp3 length: %d\" % mp3_length\n return mp3_length\n\naudio_file = audio_dir + \"/\" + audiofile_dawn\n\nprint audio_file\nlengthy = get_mp3_length(audio_file)\nprint lengthy\n" }, { "alpha_fraction": 0.5656270980834961, "alphanum_fraction": 0.6008526086807251, "avg_line_length": 29.724138259887695, "blob_id": "29806833a92cf7489abb4cb59b0cca85e164dfc1", "content_id": "56c5b17073c51b1c548a5840d57279314088e487", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 4457, "license_type": "permissive", "max_line_length": 75, "num_lines": 145, "path": "/config_user.ini", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "####################################################\n#\n# Nature Emulator: Default Config File\n#\n# it is not recomended to edit this file\n# to override, edit \"config_user.ini\" instead\n#\n####################################################\n\n\n########################################################################\n# General Configs\n#\n[General]\nPRIMARY_AUDIO_DIRECTORY = \"\"\nSECONDARY_AUDIO_DIRECTORY = \"/pub/audio/nature/Sounds_of_Nature_Collection\"\n# location of audio files: full path for directory\n\nCITY_LOCATION = \"Denver\"\n# see: \n\n########################################################################\n# Hardware\n#\nENABLE_WIFI = \"true\"\nWIFI_ACCESS_POINT = \"\"\nWIFI_PASSWORD = \"\"\n\n\n########################################################################\n# Background sound\n#\n[Background_audio]\n\nBACKGROUND_SOUND_ENABLE = True\n# Will play the following file 24h a day (True or False)\nBACKGROUND_SOUND_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: rain, waves, wind\nBACKGROUND_SOUND_VOLUME = 18\n# Volume for the background sound (0.0 min - 1.0 max)\n\n#########################################################################\n# Definition for times of the day\n#\n# Division of times of day\n# Beginning of day>>\n# dawn > morning > midday > evening > dusk > night\n# <<End of day\n# \n# dawn: twilight till sunrise example(05:00 to 05:40)\n# twilight = sun 18 degrees below the horizon\n# sunrise = sunrise\n# morning: sunrise till midmorning example(5:45 to 10:00)\n# sunrise = sunrise\n# midmorning = (12:00 - sunrise) / 2\n# midday: midmorning till midafternoon example(10:00 to 15:00\n# midmorning = (12:00 - sunrise) / 2\n# midafternoon = (sunset - 12:00) / 2\n# evening: midafternoon to sunset, example(15:00 to 20:15)\n# midafternoon: (sunset - 12:00) / 2\n# sunset = sunset\n# dusk: sunset till dusk, example(20:15 to 20:45)\n# sunset = sunset\n# dusk = sun 18 degrees below the horizon\n# night: dusk till latenight, example(20:45 to 22:00)\n# dusk = sun 18 degrees below the horizon\n# latenight = (24:00 - sunset) / 2\n#\n# Fixed times of the day\n#\n########################################################################\n# Time of day Modes\n# \n[Time_of_day_modes]\nENABLE_FIXED_TIME_MODE = True\n# if True \"start_of_day\" and \"end_of_day\" is set to \"fixed\"\n# True or False\nSTART_OF_THE_DAY_MODE = \"fixed\" # options \"fixed\", \"dawn\", \"sunrise\"\n# if start of day is \"fixed\", set \"dawn\" and \"sunrise\" times\nEND_OF_THE_DAY_MODE = \"fixed\" \n# options \"fixed\", \"sunset\", \"dusk\", \"latenight\"\n# if end of day is \"fixed\", set \"dawn\" and \"sunrise\" times\n# if sunset, audio will end at sunset\n# if dusk, audio will end at dusk\n# if latenight, audio will end a latenight\nENABLE_FIXED_LATENIGHT = True\n# if mode is \"fixed\" this allows you to disable audio after dusk\n# True or False\n\n######################################################################\n# Times of day for \"fixed\" mode\n# Time format: HH:MM in 24h clock, example 22:00 is 10pm\n[Fixed_times_of_day]\nFIXED_TIME_DAWN = \"05:00\"\nFIXED_TIME_SUNRISE = \"05:45\"\nFIXED_TIME_MIDMORNING = \"10:00\"\nFIXED_TIME_MIDAFTERNOON = \"15:00\"\nFIXED_TIME_SUNSET = \"20:15\"\nFIXED_TIME_DUSK = \"20:45\"\nFIXED_TIME_LATENIGHT = \"22:00\"\n\n####################################################################\n# Audio Filenames\n# mp3, wav or ogg files\n[Filenames]\nDAWN_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: light bird sounds\nSUNRISE_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: moderate bird sounds\nMIDMORNING_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: light bird sounds\nMIDAFTERNOON_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: moderate birds sounds, rain, storms\nSUNSET_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: crickets, frogs, \nDUSK_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: crickets, frogs\nLATENIGHT_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n# recomended: owls, coyotes, wolves\n\n\n#########################\n# Optional\n#\n[Optional]\nOVERLAP_TIME = 120 \n# overlap time between files, between times of day (seconds)\n\nAUDIO_FADE_TIME = 20 \n# fade in/out time for each audio file (seconds)\n\nDEBUG = True\n# DEBUG = False\n# enable more verbose output for debuging (True or False)\n\n\n# Old Configs\n[Old_config]\nAUDIO_CROSSFADE_TIME = 2000 \n# time for audio crossfade (ms)\n# init_start_time_sec = 1\nstart_time = \"13:36\" \n# HH:MM\nstop_time = \"14:32\" \n# HH:MM\n\n\n" }, { "alpha_fraction": 0.6034436225891113, "alphanum_fraction": 0.6456819772720337, "avg_line_length": 30.483051300048828, "blob_id": "914de7c95b720fe4b9c049454bcf21be776b103e", "content_id": "e00107adf87390b059871c306a4a425a2e45710c", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3717, "license_type": "permissive", "max_line_length": 69, "num_lines": 118, "path": "/config_file.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\n\n# Main Configs\n\nAUDIO_DIRECTORY = \"\"\n # location of audio files: full path for directory\n\nCITY_LOCATION = \"Denver\"\n # see: \n\n\n####################\n# Background sound\nBACKGROUND_SOUND_ENABLE = True\n # Will play the following file 24h a day (True or False)\nBACKGROUND_SOUND_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: rain, waves, wind\nBACKGROUND_SOUND_VOLUME = 0.7\n # Volume for the background sound (0.0 min - 1.0 max)\n\n######################################\n# Definition for times of the day\n#\n# Division of times of day\n# Beginning of day>>\n# dawn > morning > midday > evening > dusk > night\n# <<End of day\n# \n# dawn: twilight till sunrise example(05:00 to 05:40)\n# twilight = sun 18 degrees below the horizon\n# sunrise = sunrise\n# morning: sunrise till midmorning example(5:45 to 10:00)\n# sunrise = sunrise\n# midmorning = (12:00 - sunrise) / 2\n# midday: midmorning till midafternoon example(10:00 to 15:00\n# midmorning = (12:00 - sunrise) / 2\n# midafternoon = (sunset - 12:00) / 2\n# evening: midafternoon to sunset, example(15:00 to 20:15)\n# midafternoon: (sunset - 12:00) / 2\n# sunset = sunset\n# dusk: sunset till dusk, example(20:15 to 20:45)\n# sunset = sunset\n# dusk = sun 18 degrees below the horizon\n# night: dusk till latenight, example(20:45 to 22:00)\n# dusk = sun 18 degrees below the horizon\n# latenight = (24:00 - sunset) / 2\n#\n# Fixed times of the day\n\n###############################\n# Time of day Modes\n# \nENABLE_FIXED_TIME_MODE = True\n # if True \"start_of_day\" and \"end_of_day\" is set to \"fixed\"\n # True or False\nSTART_OF_THE_DAY_MODE = \"fixed\" # options \"fixed\", \"dawn\", \"sunrise\"\n # if start of day is \"fixed\", set \"dawn\" and \"sunrise\" times\nEND_OF_THE_DAY_MODE = \"fixed\" \n # options \"fixed\", \"sunset\", \"dusk\", \"latenight\"\n # if end of day is \"fixed\", set \"dawn\" and \"sunrise\" times\n # if sunset, audio will end at sunset\n # if dusk, audio will end at dusk\n # if latenight, audio will end a latenight\nENABLE_FIXED_LATENIGHT = True\n # if mode is \"fixed\" this allows you to disable audio after dusk\n # True or False\n\n##################################\n# Times of day for \"fixed\" mode\n# Time format: HH:MM in 24h clock, example 22:00 is 10pm\nFIXED_TIME_DAWN = \"05:00\"\nFIXED_TIME_SUNRISE = \"05:45\"\nFIXED_TIME_MIDMORNING = \"10:00\"\nFIXED_TIME_MIDAFTERNOON = \"15:00\"\nFIXED_TIME_SUNSET = \"20:15\"\nFIXED_TIME_DUSK = \"20:45\"\nFIXED_TIME_LATENIGHT = \"22:00\"\n\n#####################\n# Audio Filenames\n# mp3, wav or ogg files\nDAWN_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: light bird sounds\nSUNRISE_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: moderate bird sounds\nMIDMORNING_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: light bird sounds\nMIDAFTERNOON_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: moderate birds sounds, rain, storms\nSUNSET_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: crickets, frogs, \nDUSK_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: crickets, frogs\nLATENIGHT_AUDIO_FILENAME = '13_Streamside_Songbirds.ogg'\n # recomended: owls, coyotes, wolves\n\n\n#########################\n# Optional\n#\nOVERLAP_TIME = 120 \n # overlap time between files, between times of day (seconds)\n\nAUDIO_FADE_TIME = 20 \n # fade in/out time for each audio file (seconds)\n\nDEBUG = True\n # DEBUG = False\n # enable more verbose output for debuging (True or False)\n\n\n# Old Configs\nAUDIO_CROSSFADE_TIME = 2000 # time for audio crossfade (ms)\n# init_start_time_sec = 1\nstart_time = \"13:36\" # HH:MM\nstop_time = \"14:32\" # HH:MM\n\n\n" }, { "alpha_fraction": 0.6039854288101196, "alphanum_fraction": 0.6359809041023254, "avg_line_length": 23.91608428955078, "blob_id": "990e205837fcaccc51f61f32e1d767a93acce62a", "content_id": "10d025627f2e924419a9a8935a034796d3beedbb", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3563, "license_type": "permissive", "max_line_length": 118, "num_lines": 143, "path": "/steps/07-schedualed_play-at-time.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\nimport time \nimport threading\n\ndef play_audio(filename, start_time_s, stop_time_s):\n\n durration_s = stop_time_s - start_time_s\n\n print \"playing sound\"\n sounda= pygame.mixer.Sound(filename)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0, fade_ms=5000)\n time.sleep(durration_s)\n sounda.fadeout(5000)\n \n return\n\n# Converts HH:MM:SS to seconds\ndef hms_to_seconds(t):\n h, m, s = [int(i) for i in t.split(':')]\n return 3600*h + 60*m + s\n\n# Converts HH:MM to seconds\ndef hm_to_seconds(t):\n h, m = [int(i) for i in t.split(':')]\n return 3600*h + 60*m\n\n\ndef hello(a, b, c):\n print \"a=%s, b=%s, c=%s\" % (a, b, c)\n return\n\n\n# Play file at start_time and stop at stop_time in a seperate thread\n# start_time and stop_time is in HH:MM format\ndef schedual_play(filename, start_time_hm, stop_time_hm):\n \n stop_time_s = hm_to_seconds(stop_time_hm) #(seconds)\n start_time_s = hm_to_seconds(start_time_hm) #(seconds)\n current_time_hm = time.strftime(\"%H:%M\")\n current_time_s = hm_to_seconds(current_time_hm) #(seconds)\n\n print \"%s: Current_time: %s [%ds]\" % (time.strftime(\"%H:%M:%S\"), current_time_hm, current_time_s)\n print \"%s: Start_time: %s [%ds]\" % (time.strftime(\"%H:%M:%S\"), start_time_hm, start_time_s)\n print \"%s: Stop_time: %s [%ds]\" % (time.strftime(\"%H:%M:%S\"), stop_time_hm, stop_time_s)\n print \"%s: Wait_time: [%ds]\" % (time.strftime(\"%H:%M:%S\"), start_time_s - current_time_s)\n if start_time_s - current_time_s < 0:\n print \"%s: Warning wait_time is negative!: [%ds]\" % (time.strftime(\"%H:%M:%S\"), start_time_s - current_time_s)\n print \"%s: Durration: [%ds]\" % (time.strftime(\"%H:%M:%S\"), stop_time_s - start_time_s)\n if stop_time_s - start_time_s < 0:\n print \"%s: Warning durration is negative!: [%ds]\" % (time.strftime(\"%H:%M:%S\"), stop_time_s - start_time_s)\n\n print \"%s: queing\" % (time.strftime(\"%H:%M:%S\"))\n\n # event2 = threading.Timer(2, hello, [\"a\", \"b\", \"c\"])\n # event2.daemon = True\n # event2.start()\n\n\n\n event = threading.Timer(start_time_s - current_time_s, play_audio, [filename, start_time_s, stop_time_s])\n event.daemon = True\n event.start()\n\n return\n\n\n\npygame.mixer.init()\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.init()\nprint \"Running Prog\"\n\nfilename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/250Hz_44100Hz_16bit_30sec.ogg'\n\nstart_time = \"22:45\"\nstop_time = \"22:46\"\n\n# print \"playing sound\"\n# sound= pygame.mixer.Sound(filename)\n# print \"play 1\"\n# sound.play(loops=0, maxtime=0, fade_ms=5000)\n# time.sleep(5)\n# sound.fadeout(5000)\n\n\n\nschedual_play(filename, start_time, stop_time)\n\n# event1 = threading.Timer(5, play_audio, [filename])\n# event1.start()\nprint \"1s\"\ntime.sleep(1)\nprint \"2s\"\n\ntime.sleep(1)\nprint \"3s\"\n\ntime.sleep(1)\nprint \"4s\"\n\ntime.sleep(1)\nprint \"5s\"\n\ntime.sleep(1)\nprint \"6s\"\n\ntime.sleep(1)\nprint \"7s\"\n\ntime.sleep(1)\nprint \"8s\"\n\ntime.sleep(1)\nprint \"9s\"\n\ntime.sleep(1)\nprint \"10s\"\n\nx=1\nwhile True:\n print \"%s: sleeping for : 1m [%d]\" % (time.strftime(\"%H:%M:%S\"), x)\n time.sleep(60)\n x += 1\n\n\n# soundb= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n# soundc= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n# soundd= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n\n# print \"one\"\n# leep(1)\n# print \"two\"\n# soundb.play()\n# time.sleep(1)\n# print \"three\"\n# soundc.play()\n# time.sleep(1)\n# print \"four\"\n# soundd.play()\n\ntime.sleep(7)\n" }, { "alpha_fraction": 0.6295081973075867, "alphanum_fraction": 0.6944262385368347, "avg_line_length": 17.827159881591797, "blob_id": "06455781fd6cdb6e8f67d53e36a1bd8a5042e4b5", "content_id": "2ccef9569732482f3ec5156a1a8150bc3ed770c6", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1525, "license_type": "permissive", "max_line_length": 98, "num_lines": 81, "path": "/steps/schedualed_play.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\nfrom time import sleep\nimport threading\n\ndef play_audio(file_name):\n print \"playing sound\"\n sounda= pygame.mixer.Sound(file_name)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0, fade_ms=5000)\n sleep(3)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0, fade_ms=5000)\n sleep(3)\n print \"play 1\"\n sounda.play(loops=0, maxtime=0, fade_ms=5000)\n sleep(3)\n\n return\n\npygame.mixer.init()\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.init()\nprint \"Running Prog\"\n\nfilename = '/home/zymos/Documents/docs/projects/pi/nature_emulator/tone01.wav'\nevent1 = threading.Timer(5, play_audio, [filename])\nevent1.start()\nfilename2 = '/home/zymos/Documents/docs/projects/pi/nature_emulator/440Hz_44100Hz_16bit_30sec.wav'\nevent2 = threading.Timer(8, play_audio, [filename2])\nevent2.start()\n\nsleep(1)\n# pygame.time.wait(1000)\nprint \"1\"\nsleep(1)\nprint \"2\"\n\nsleep(1)\nprint \"3\"\n\nsleep(1)\nprint \"4\"\n\nsleep(1)\nprint \"5\"\n\nsleep(1)\nprint \"6\"\n\nsleep(1)\nprint \"7\"\n\nsleep(1)\nprint \"8\"\n\nsleep(1)\nprint \"9\"\n\nsleep(1)\nprint \"10\"\n\nsleep(1)\nprint \"11\"\n\n# soundb= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n# soundc= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n# soundd= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n\n# print \"one\"\n# leep(1)\n# print \"two\"\n# soundb.play()\n# sleep(1)\n# print \"three\"\n# soundc.play()\n# sleep(1)\n# print \"four\"\n# soundd.play()\n\nsleep(7)\n" }, { "alpha_fraction": 0.6339285969734192, "alphanum_fraction": 0.6741071343421936, "avg_line_length": 14.857142448425293, "blob_id": "0e0ec88bdadd99025b29015f86e2591683877b13", "content_id": "b72ab167d381a71dc763b62df1b61c2c95124ea2", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 448, "license_type": "permissive", "max_line_length": 104, "num_lines": 28, "path": "/docs/manual.md", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "# Manual\n\n## Display\n\nDefault \n> 10:30:01\n> \n\nSet time:\n> Set time: \n> 10:30:*1*0\n\nTime mode:\n> Time mode:\n> 24 hour\nOptions:\n - 12 hour\n - 24 hour\n\nSound mode:\n> Sound mode:\n> actual sunset/sunrise\nOptions: \"mute\", \"actual sunset/sunrise\", \"fixed sunset/sunrise\", \"config settings\", \"continuious sound\"\n\nContiunious sound mode:\n> Contiunious sound mode:\n> rain\nOptions: \"ocean\", \"rain\", \"stream\", \"white noise\", \"static\", forest, thunderstorm\n\n\n\n\n" }, { "alpha_fraction": 0.7104591727256775, "alphanum_fraction": 0.7589285969734192, "avg_line_length": 26.034482955932617, "blob_id": "1baf329da30930fc9613408d985742c0163e5da3", "content_id": "4c7fac5f7f09684aba79bb623ccb1cea196e72c8", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "permissive", "max_line_length": 95, "num_lines": 29, "path": "/steps/multitrack_play.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pygame\nfrom time import sleep\n\npygame.mixer.init()\npygame.mixer.pre_init(44100, -16, 2, 2048)\npygame.init()\nprint \"hey I finaly got this working!\"\nsounda= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/tone01.wav')\nsoundb= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\nsoundc= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\nsoundd= pygame.mixer.Sound('/home/zymos/Documents/docs/projects/pi/nature_emulator/271.ogg')\n\nprint \"one\"\nprint sounda.get_length()\nsounda.play(loops=0, maxtime=0, fade_ms=5000)\nsleep(3)\nsounda.fadeout(4000)\nsleep(1)\nprint \"two\"\nsoundb.play()\nsleep(1)\nprint \"three\"\nsoundc.play()\nsleep(1)\nprint \"four\"\nsoundd.play()\n\nsleep(7)\n" }, { "alpha_fraction": 0.6133333444595337, "alphanum_fraction": 0.6316666603088379, "avg_line_length": 24, "blob_id": "eb459c8e91549ba00f0f796e205302cfde7e0cb2", "content_id": "1d464866025d643440a33d18d4bf98bd2780104b", "detected_licenses": [ "LicenseRef-scancode-public-domain", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 600, "license_type": "permissive", "max_line_length": 59, "num_lines": 24, "path": "/test.py", "repo_name": "zymos/nature_emulator", "src_encoding": "UTF-8", "text": "import datetime\nfrom astral import Astral\n\ncity_name = 'Denver'\n\na = Astral()\na.solar_depression = 'civil'\n\ncity = a[city_name]\n\nprint('Information for %s/%s\\n' % (city_name, city.region))\n\ntimezone = city.timezone\nprint('Timezone: %s' % timezone)\n\nprint('Latitude: %.02f; Longitude: %.02f\\n' % \\\n (city.latitude, city.longitude))\n\nsun = city.sun(date=datetime.date(2009, 4, 22), local=True)\nprint('Dawn: %s' % str(sun['dawn']))\nprint('Sunrise: %s' % str(sun['sunrise']))\nprint('Noon: %s' % str(sun['noon']))\nprint('Sunset: %s' % str(sun['sunset']))\nprint('Dusk: %s' % str(sun['dusk']))\n" } ]
21
iaiamomo/Graphs
https://github.com/iaiamomo/Graphs
4fe1f595bb6abb945d44a3f3c652bbeb40add4ef
ff479b53153ae0b7b34150cdc0e5ba14e54115f4
f5e4be7f2c342b53bd7d92de2041fe0baba4dff6
refs/heads/master
2023-02-27T11:02:29.236161
2021-02-07T11:12:43
2021-02-07T11:12:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5127822756767273, "alphanum_fraction": 0.6376224756240845, "avg_line_length": 31.157533645629883, "blob_id": "ef82b71fc088025e67cca3d6cd3bd5c20d125b1e", "content_id": "fc5336fb42b3febf54d5dbfb6068463a40a31015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4694, "license_type": "no_license", "max_line_length": 70, "num_lines": 146, "path": "/codes/watts.py", "repo_name": "iaiamomo/Graphs", "src_encoding": "UTF-8", "text": "import random\nfrom numpy.random import choice\nimport networkx as nx\nfrom itertools import combinations\nimport matplotlib.pyplot as plt\nimport collections\n\ndef ws_graph(n, k, beta):\n\tg = nx.Graph()\n\n\tif k == 0:\n\t\tg.add_nodes_from(range(n))\n\t\treturn g\n\t\n\tfor i in range(n):\n\t\tfor j in range(i+1, i+k//2+1):\n\t\t\tg.add_edge(i, j % n)\n\n\t#considerin only k/2 neighbors on the right\n\tif beta > 0 and beta <= 1:\n\t\tfor i in range(n):\n\t\t\tneighbors = range(i+1, i+k//2+1)\n\t\t\tfor j in range(k//2):\n\t\t\t\tif neighbors[j] >= n:\n\t\t\t\t\tneighbors[j] = neighbors[j]%n\n\t\t\tnode = [i]*(k//2)\n\t\t\tfor n1,n2 in zip(node, neighbors):\n\t\t\t\tif random.random() < beta:\n\t\t\t\t\tnew_end = random.randint(0, n-1)\n\t\t\t\t\twhile new_end == n1 or g.has_edge(n1, new_end):\n\t\t\t\t\t\tnew_end = random.randint(0, n-1)\n\t\t\t\t\tg.remove_edge(n1, n2)\n\t\t\t\t\tg.add_edge(n1, new_end)\n\n\treturn g\n\ndef info_graph(g, info):\n\tprint(\"Watts-Strogatz graph with \" + info)\n\t# connectivity\n\tif nx.is_connected(g):\n\t\tprint(\"The graph is connected\")\n\telse:\n\t\tprint(\"The graph is not connected\")\n\n\t# clustering coefficient\n\tcc = nx.average_clustering(g)\n\tprint(\"Clustering coefficient: \" + str(cc))\n\t\n\t# diameter\n\td = nx.diameter(g)\n\tprint(\"Diameter: \" + str(d))\n\n\t# size biggest connected component\n\tlargest_cc = max(nx.connected_component_subgraphs(g), key=len)\n\tsize_cc = largest_cc.size()\n\tprint(\"Size largest component: \" + str(size_cc))\n\ndef print_degree(g, name, i):\n\tplt.figure(i)\n\n\tdegree_sequence = sorted([d for n,d in g.degree()], reverse=True)\n\tdegreeCount = collections.Counter(degree_sequence)\n\tdeg, cnt = zip(*degreeCount.items())\n\n\tfig, ax = plt.subplots()\n\tplt.bar(deg, cnt, width = 0.80, color = '#468BFB', alpha = 0.5)\n\n\tplt.title(\"Degree Histogram \" + name)\n\tplt.ylabel(\"Count\")\n\tplt.xlabel(\"Degree\")\n\tax.set_xticks([d + 0.4 for d in deg])\n\tax.set_xticklabels(deg)\n\ndef print_graph(g, info, i):\n\tplt.figure(i)\n\tplt.title(info)\n\tpos = nx.spring_layout(g)\n\tnx.draw_networkx_nodes(g, pos, node_size = 5, node_color = '#FF7F00')\n\tnx.draw_networkx_edges(g, pos, edge_color = '#4F4F4F', alpha = 0.2)\n\ndef main():\n\t#n = 15k\n\tg1 = ws_graph(15000, 25, 0.0002)\n\tg2 = ws_graph(15000, 25, 0.003)\n\tg3 = ws_graph(15000, 25, 0.05)\n\tg4 = ws_graph(15000, 25, 0.1)\n\tg5 = ws_graph(15000, 25, 0.5)\n\tg6 = ws_graph(15000, 25, 0.8)\n\tinfo_graph(g1, \"n=15k, k=25, beta=0.0002\")\n\tinfo_graph(g2, \"n=15k, k=25, beta=0.003\")\n\tinfo_graph(g3, \"n=15k, k=25, beta=0.05\")\n\tinfo_graph(g4, \"n=15k, k=25, beta=0.1\")\n\tinfo_graph(g5, \"n=15k, k=25, beta=0.5\")\n\tinfo_graph(g6, \"n=15k, k=25, beta=0.8\")\n\tprint_degree(g1, \"Watts-Strogatz n=15k, k=25, beta=0.0002\", 0)\n\tprint_degree(g2, \"Watts-Strogatz n=15k, k=25, beta=0.003\", 1)\n\tprint_degree(g3, \"Watts-Strogatz n=15k, k=25, beta=0.05\", 2)\n\tprint_degree(g4, \"Watts-Strogatz n=15k, k=25, beta=0.1\", 3)\n\tprint_degree(g5, \"Watts-Strogatz n=15k, k=25, beta=0.5\", 4)\n\tprint_degree(g6, \"Watts-Strogatz n=15k, k=25, beta=0.8\", 5)\n\n\t#n = 10k\n\tg7 = ws_graph(10000, 25, 0.0002)\n\tg8 = ws_graph(10000, 25, 0.003)\n\tg9 = ws_graph(10000, 25, 0.05)\n\tg10 = ws_graph(10000, 25, 0.1)\n\tg11 = ws_graph(10000, 25, 0.5)\n\tg12 = ws_graph(10000, 25, 0.8)\n\tinfo_graph(g7, \"n=10k, k=25, beta=0.0002\")\n\tinfo_graph(g8, \"n=10k, k=25, beta=0.003\")\n\tinfo_graph(g9, \"n=10k, k=25, beta=0.05\")\n\tinfo_graph(g10, \"n=10k, k=25, beta=0.1\")\n\tinfo_graph(g11, \"n=10k, k=25, beta=0.5\")\n\tinfo_graph(g12, \"n=10k, k=25, beta=0.8\")\n\tprint_degree(g7, \"Watts-Strogatz n=10k, k=25, beta=0.0002\", 6)\n\tprint_degree(g8, \"Watts-Strogatz n=10k, k=25, beta=0.003\", 7)\n\tprint_degree(g9, \"Watts-Strogatz n=10k, k=25, beta=0.05\", 8)\n\tprint_degree(g10, \"Watts-Strogatz n=10k, k=25, beta=0.1\", 9)\n\tprint_degree(g11, \"Watts-Strogatz n=10k, k=25, beta=0.5\", 10)\n\tprint_degree(g12, \"Watts-Strogatz n=10k, k=25, beta=0.8\", 11)\n\t#print_graph(g9, \"Watts-Strogatz n=10k, k=25, beta=0.05\", 0)\n\t#print_graph(g12, \"Watts-Strogatz n=10k, k=25, beta=0.8\", 0)\n\n\t#n = 5k\n\tg13 = ws_graph(5000, 25, 0.0002)\n\tg14 = ws_graph(5000, 25, 0.003)\n\tg15 = ws_graph(5000, 25, 0.05)\n\tg16 = ws_graph(5000, 25, 0.1)\n\tg17 = ws_graph(5000, 25, 0.5)\n\tg18 = ws_graph(5000, 25, 0.8)\n\tinfo_graph(g13, \"n=5k, k=25, beta=0.0002\")\n\tinfo_graph(g14, \"n=5k, k=25, beta=0.003\")\n\tinfo_graph(g15, \"n=5k, k=25, beta=0.05\")\n\tinfo_graph(g16, \"n=5k, k=25, beta=0.1\")\n\tinfo_graph(g17, \"n=5k, k=25, beta=0.5\")\n\tinfo_graph(g18, \"n=5k, k=25, beta=0.8\")\n\tprint_degree(g13, \"Watts-Strogatz n=5k, k=25, beta=0.0002\", 12)\n\tprint_degree(g14, \"Watts-Strogatz n=5k, k=25, beta=0.003\", 13)\n\tprint_degree(g15, \"Watts-Strogatz n=5k, k=25, beta=0.05\", 14)\n\tprint_degree(g16, \"Watts-Strogatz n=5k, k=25, beta=0.1\", 15)\n\tprint_degree(g17, \"Watts-Strogatz n=5k, k=25, beta=0.5\", 16)\n\tprint_degree(g18, \"Watts-Strogatz n=5k, k=25, beta=0.8\", 17)\n\n\tplt.show()\n\nmain()" }, { "alpha_fraction": 0.7440476417541504, "alphanum_fraction": 0.75, "avg_line_length": 20, "blob_id": "b03a3e67b3e98318bfb5949ccd9ac404de2a6f59", "content_id": "d26f274dc449348b748afeb7f50e5baa622f79d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 168, "license_type": "no_license", "max_line_length": 75, "num_lines": 8, "path": "/README.md", "repo_name": "iaiamomo/Graphs", "src_encoding": "UTF-8", "text": "# Graphs models\nHomework 1 of Social Network and Online Markets course at La Sapienza, Rome\n\n**Erdos-Renyi graph**\n\n**Watts-Strogatz graph**\n\n**Barabasi-Albert graph**\n" }, { "alpha_fraction": 0.6052975654602051, "alphanum_fraction": 0.6530411839485168, "avg_line_length": 25.145299911499023, "blob_id": "6bdda2366e79ecd24fe15cebec1c0cd8c7f2b82a", "content_id": "9170ab1154648b9f8daf1a2d25d13e6902cdfb27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3058, "license_type": "no_license", "max_line_length": 99, "num_lines": 117, "path": "/codes/barabasi.py", "repo_name": "iaiamomo/Graphs", "src_encoding": "UTF-8", "text": "import random\nfrom numpy.random import choice\nimport networkx as nx\nfrom itertools import combinations\nimport matplotlib.pyplot as plt\nimport collections\n\ndef ba_end(g):\n\tprob_degree = []\n\tnodes = g.nodes()\n\tn_edges = len(g.edges())\n\tfor n in nodes:\n\t\tprob_n = float(g.degree(n)) / float((2 * n_edges))\n\t\tprob_degree.append(prob_n)\n\tendpoint = choice(nodes, p = prob_degree)\n\treturn endpoint\n\ndef ba_graph(n, l):\n\tg = nx.Graph()\n\tg.add_node(0)\n\n\tif n > l+1:\n\t\tg.add_nodes_from(range(1, l+1))\n\t\tnodes = g.nodes()\n\t\tfor n1, n2 in combinations(nodes, 2):\n\t\t\tg.add_edge(n1, n2)\n\n\t\tfor i in range(l+1, n):\t\t\t\t\t\t\t\t\t\t\t# adding the other nodes\n\t\t\tg.add_node(i)\n\t\t\tfor j in range(l):\t\t\t\t\t\t\t\t\t\t\t# choose endpoint randomly proportional to Pv=degree(v)/sum(degree)\n\t\t\t\tendpoint = ba_end(g)\n\t\t\t\tedges = g.edges()\n\t\t\t\twhile (i, endpoint) in edges:\n\t\t\t\t\tendpoint = ba_end(g)\n\t\t\t\tg.add_edge(i, endpoint)\n\n\tif n <= l:\n\t\tg.add_nodes_from(range(1, l+1))\n\t\tnodes = g.nodes()\n\t\tfor n1, n2 in combinations(nodes, 2):\n\t\t\tg.add_edge(n1, n2)\n\n\treturn g\n\ndef info_graph(g, info):\n\tprint(info)\n\t# connectivity\n\tif nx.is_connected(g):\n\t\tprint(\"The graph is connected\")\n\telse:\n\t\tprint(\"The graph is not connected\")\n\n\t# clustering coefficient\n\tcc = nx.average_clustering(g)\n\tprint(\"Clustering coefficient: \" + str(cc))\n\t\n\t# diameter\n\td = nx.diameter(g)\n\tprint(\"Diameter: \" + str(d))\n\n\t# size biggest connected component\n\tlargest_cc = max(nx.connected_component_subgraphs(g), key=len)\n\tsize_cc = largest_cc.size()\n\tprint(\"Size largest component: \" + str(size_cc))\n\ndef print_degree(g, name, i):\n\tplt.figure(i)\n\n\tdegree_sequence = sorted([d for n,d in g.degree()], reverse=True)\n\tdegreeCount = collections.Counter(degree_sequence)\n\tdeg, cnt = zip(*degreeCount.items())\n\n\tfig, ax = plt.subplots()\n\tplt.bar(deg, cnt, width = 0.80, color = '#468BFB', alpha = 0.5)\n\n\tplt.title(\"Degree Histogram \" + name)\n\tplt.ylabel(\"Count\")\n\tplt.xlabel(\"Degree\")\n\tax.set_xticks([d + 0.4 for d in deg])\n\tax.set_xticklabels(deg)\n\ndef print_graph(g, info, i):\n\tplt.figure(i)\n\tplt.title(info)\n\tpos = nx.spring_layout(g)\n\tnx.draw_networkx_nodes(g, pos, node_size = 5, node_color = '#FF7F00')\n\tnx.draw_networkx_edges(g, pos, edge_color = '#4F4F4F', alpha = 0.2)\n\ndef main():\n\t#n = 15k\n\tg1 = ba_graph(15000, 4)\n\tg2 = ba_graph(15000, 10)\n\tinfo_graph(g1, \"Barabasi-Albert n=15k, l=4\")\n\tinfo_graph(g2, \"Barabasi-Albert n=15k, l=10\")\n\tprint_degree(g1, \"Barabasi-Albert n=15k, l=4\", 0)\n\tprint_degree(g2, \"Barabasi-Albert n=15k, l=10\", 1)\n\t\n\t#n = 10k\n\tg3 = ba_graph(10000, 4)\n\tg4 = ba_graph(10000, 10)\n\tinfo_graph(g3, \"Barabasi-Albert n=10k, l=4\")\n\tinfo_graph(g4, \"Barabasi-Albert n=10k, l=10\")\n\tprint_degree(g3, \"Barabasi-Albert n=10k, l=4\", 2)\n\tprint_degree(g4, \"Barabasi-Albert n=10k, l=10\", 3)\n\t#print_graph(g3, \"Barabasi-Albert n=10k, l=4\", 0)\n\t\n\t#n = 5k\n\tg5 = ba_graph(5000, 4)\n\tg6 = ba_graph(5000, 10)\n\tinfo_graph(g5, \"Barabasi-Albert n=5k, l=4\")\n\tinfo_graph(g6, \"Barabasi-Albert n=5k, l=10\")\n\tprint_degree(g5, \"Barabasi-Albert n=5k, l=4\", 4)\n\tprint_degree(g6, \"Barabasi-Albert n=5k, l=10\", 5)\n\t\n\tplt.show()\n\nmain()" }, { "alpha_fraction": 0.5346238017082214, "alphanum_fraction": 0.6474918127059937, "avg_line_length": 27.007633209228516, "blob_id": "6002b2b2e9c1e235c4aaf559f7f72b856cf19306", "content_id": "ca17ea14330b6a8892fbd0111a8534e79cdd96bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3668, "license_type": "no_license", "max_line_length": 79, "num_lines": 131, "path": "/codes/erdos.py", "repo_name": "iaiamomo/Graphs", "src_encoding": "UTF-8", "text": "import random\nfrom numpy.random import choice\nimport networkx as nx\nfrom itertools import combinations\nimport matplotlib.pyplot as plt\nimport collections\n\ndef er_graph(n,p):\n\tg = nx.Graph()\n\n\tnodes = range(n)\n\tg.add_nodes_from(nodes)\n\n\tif p <= 0:\n\t\treturn g\n\n\tfor n1, n2 in combinations(nodes, 2):\n\t if random.random() < p:\n\t g.add_edge(n1, n2)\n\n\treturn g\n\ndef info_graph(g, info):\n\tprint(\"Erdos-Renyi graph with \" + info)\n\t# connectivity\n\tif nx.is_connected(g):\n\t\tprint(\"The graph is connected\")\n\telse:\n\t\tprint(\"The graph is not connected\")\n\n\t# clustering coefficient\n\tcc = nx.average_clustering(g)\n\tprint(\"Clustering coefficient: \" + str(cc))\n\t\n\t# diameter\n\td = 0\n\tlargest_cc = max(nx.connected_component_subgraphs(g), key=len)\n\tif nx.is_connected(g) == False:\n\t\td_max = 0\n\t\tlist_conn_comp = [g.subgraph(c).copy() for c in nx.connected_components(g)]\n\t\tfor sg in list_conn_comp:\n\t\t\td_conn_comp = nx.diameter(sg)\n\t\t\tif d_conn_comp > d_max:\n\t\t\t\td_max = d_conn_comp\n\t\td = d_max\n\telse:\n\t\td = nx.diameter(g)\n\tprint(\"Diameter: \" + str(d))\n\n\t# size biggest connected component\n\tlargest_cc = max(nx.connected_component_subgraphs(g), key=len)\n\tsize_cc = largest_cc.size()\n\tprint(\"Size largest component: \" + str(size_cc))\n\ndef print_degree(g, name, i):\n\tplt.figure(i)\n\tdegree_sequence = sorted([d for n,d in g.degree()], reverse=True)\n\tdegreeCount = collections.Counter(degree_sequence)\n\tdeg, cnt = zip(*degreeCount.items())\n\n\tfig, ax = plt.subplots()\n\tplt.bar(deg, cnt, width = 0.80, color = '#468BFB', alpha = 0.5)\n\n\tplt.title(\"Degree Histogram \" + name)\n\tplt.ylabel(\"Count\")\n\tplt.xlabel(\"Degree\")\n\tax.set_xticks([d + 0.4 for d in deg])\n\tax.set_xticklabels(deg)\n\ndef print_graph(g, info, i):\n\tplt.figure(i)\n\tplt.title(info)\n\tpos = nx.spring_layout(g)\n\tnx.draw(g, pos, node_size = 5, node_color = '#FF7F00', edge_color = '#4F4F4F')\n\t'''\n\tnx.draw_networkx_nodes(g, pos, node_size = 5, node_color = '#FF7F00')\n\tnx.draw_networkx_edges(g, pos, edge_color = '#4F4F4F', alpha = 0.2)\n\t'''\n\ndef main():\n\t\n\t#n = 15k\n\tg0 = er_graph(15000, 0.00005)\n\tg1 = er_graph(15000, 0.00009)\n\tg2 = er_graph(15000, 0.0007)\n\tg3 = er_graph(15000, 0.002)\n\tinfo_graph(g0, \"n=15k, p=0.00005\")\n\tinfo_graph(g1, \"n=15k, p=0.00009\")\n\tinfo_graph(g2, \"n=15k, p=0.0007\")\n\tinfo_graph(g3, \"n=15k, p=0.002\")\n\tprint_degree(g0, \"Erdos-Renyi n=15k, p=0.00005\", 0)\n\tprint_degree(g1, \"Erdos-Renyi n=15k, p=0.00009\", 1)\n\tprint_degree(g2, \"Erdos-Renyi n=15k, p=0.0007\", 2)\n\tprint_degree(g3, \"Erdos-Renyi n=15k, p=0.002\", 3)\n\t\n\t#n = 10k\n\tg4 = er_graph(10000, 0.00008)\n\tg5 = er_graph(10000, 0.0004)\n\tg6 = er_graph(10000, 0.0009)\n\tg7 = er_graph(10000, 0.002)\n\tg8 = er_graph(10000, 0.005)\n\tinfo_graph(g4, \"n=10k, p=0.00008\")\n\tinfo_graph(g5, \"n=10k, p=0.0004\")\n\tinfo_graph(g6, \"n=10k, p=0.0009\")\n\tinfo_graph(g7, \"n=10k, p=0.002\")\n\tinfo_graph(g8, \"n=10k, p=0.005\")\n\tprint_degree(g4, \"Erdos-Renyi n=10k, p=0.00008\", 4)\n\tprint_degree(g5, \"Erdos-Renyi n=10k, p=0.0004\", 5)\n\tprint_degree(g6, \"Erdos-Renyi n=10k, p=0.0009\", 6)\n\tprint_degree(g7, \"Erdos-Renyi n=10k, p=0.002\", 7)\n\tprint_degree(g8, \"Erdos-Renyi n=10k, p=0.005\", 8)\n\t#print_graph(g6, \"Erdos-Renyi n=10k, p=0.0009\", 0)\n\t\n\t#n = 5k\n\tg9 = er_graph(5000, 0.0002)\n\tg10 = er_graph(5000, 0.0008)\n\tg11 = er_graph(5000, 0.002)\n\tg12 = er_graph(5000, 0.005)\n\tinfo_graph(g9, \"n=5k, p=0.0002\")\n\tinfo_graph(g10, \"n=5k, p=0.0008\")\n\tinfo_graph(g11, \"n=5k, p=0.002\")\n\tinfo_graph(g12, \"n=5k, p=0.005\")\n\tprint_degree(g9, \"Erdos-Renyi n=5k, p=0.0002\", 9)\n\tprint_degree(g10, \"Erdos-Renyi n=5k, p=0.0008\", 10)\n\tprint_degree(g11, \"Erdos-Renyi n=5k, p=0.002\", 11)\n\tprint_degree(g12, \"Erdos-Renyi n=5k, p=0.005\", 12)\n\t#print_graph(g11, \"Erdos-Renyi n=5k, p=0.002\", 0)\n\t\n\tplt.show()\n\t\nmain()" } ]
4
pa-tiq/leankeep
https://github.com/pa-tiq/leankeep
6310ed54d544e9ceff63ad51d7a6486232e52cb4
fac236359f87879b0caf140d859328a717a9c240
36fc9f803777b583dcee074d4bd943e3790752d6
refs/heads/master
2020-12-27T00:16:47.652635
2020-02-05T14:34:14
2020-02-05T14:34:14
237,702,487
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7226061224937439, "alphanum_fraction": 0.7324777841567993, "avg_line_length": 23.14285659790039, "blob_id": "605322b3a610544da65e6f2a55097063e1acecdc", "content_id": "96df83cc0c3c27c0da8f605256fafe3e53a6f326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 74, "num_lines": 42, "path": "/leankeep.py", "repo_name": "pa-tiq/leankeep", "src_encoding": "UTF-8", "text": "# -*- coding: utf8 -*-\n\nimport key\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport selenium\nimport time\n\ndef leankeep_a():\n\n\tsecs = 1\n\tusername_xpath = '//*[@id=\"ctl00_PageContent_UserName1\"]'\n\tpassword_xpath = '//*[@id=\"ctl00_PageContent_Password\"]'\n\tbutton_id = 'ctl00_PageContent_OKButton__Button'\n\n\tdriver = webdriver.Firefox()\n\tdriver.get(\"https://seguro.leankeep.com/leankeepX4/Security/SignIn.aspx\")\n\ttime.sleep(secs)\n\n\tuser_field = driver.find_element_by_xpath(username_xpath)\n\ttime.sleep(secs)\n\tuser_field.send_keys(Keys.CONTROL + \"a\")\n\ttime.sleep(secs)\n\tuser_field.send_keys(Keys.DELETE)\n\ttime.sleep(secs)\n\tuser_field.send_keys(key.username)\n\n\tpass_field = driver.find_element_by_xpath(password_xpath)\n\ttime.sleep(secs)\n\tpass_field.send_keys(Keys.CONTROL + \"a\")\n\ttime.sleep(secs)\n\tpass_field.send_keys(Keys.DELETE)\n\ttime.sleep(secs)\n\tpass_field.send_keys(key.password)\n\ttime.sleep(secs)\n\n\tdriver.find_element_by_id(button_id).click()\n\n\tprint(\"eita\")\n\t\n\nleankeep_a()" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 23.5, "blob_id": "8129387416bab433845add018aa9182d03a45787", "content_id": "7e6e034f5f286ab6ac029f74eb30f4fd13a5b314", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/key.py", "repo_name": "pa-tiq/leankeep", "src_encoding": "UTF-8", "text": "username = 'andrade.mep'\npassword = 'Gripen2014'" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 10.333333015441895, "blob_id": "88d075f524005d6c677ca065777ad22c23a221d2", "content_id": "5c4a4873f0d58f01bc7a6b1ec5b442e1e2b0f837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/README.md", "repo_name": "pa-tiq/leankeep", "src_encoding": "UTF-8", "text": "# leankeep\n\nSelenium web scraping" } ]
3
leetim13/PCA-and-Naive-Bayes-Classifier
https://github.com/leetim13/PCA-and-Naive-Bayes-Classifier
0370febc8069b11b21a02f98ea92779c37dfdbf8
0ce6cc59b01ac371e84a8aaef2cc528edc0e26c5
b9e2c71792ff43f9f1615857803705120c1ef7b6
refs/heads/master
2020-12-07T04:30:09.710259
2020-01-08T18:33:58
2020-01-08T18:33:58
232,631,258
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6378772258758545, "alphanum_fraction": 0.6722164154052734, "avg_line_length": 32.69512176513672, "blob_id": "0b2a8db8b17243636891c9b9526a6118508eb0c7", "content_id": "eeb43fc33f7412321da3b12f5496d1ba2de66edd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2883, "license_type": "no_license", "max_line_length": 83, "num_lines": 82, "path": "/sol.py", "repo_name": "leetim13/PCA-and-Naive-Bayes-Classifier", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport data\r\nimport math\r\nimport random\r\nimport scipy\r\nfrom scipy import optimize\r\n\r\ndef save_images(images, filename, **kwargs):\r\n fig = plt.figure(1)\r\n fig.clf()\r\n ax = fig.add_subplot(111)\r\n plot_images(images, ax, **kwargs)\r\n fig.patch.set_visible(False)\r\n ax.patch.set_visible(False)\r\n plt.savefig(filename)\r\n\r\ndef load_mnist():\r\n partial_flatten = lambda x: np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))\r\n one_hot = lambda x, k: np.array(x[:, None] == np.arange(k)[None, :], dtype=int)\r\n train_images, train_labels, test_images, test_labels = mnist()\r\n train_images = (partial_flatten(train_images) / 255.0 > .5).astype(float)\r\n test_images = (partial_flatten(test_images) / 255.0 > .5).astype(float)\r\n train_labels = one_hot(train_labels, 10)\r\n test_labels = one_hot(test_labels, 10)\r\n N_data = train_images.shape[0]\r\n\r\n return N_data, train_images, train_labels, test_images, test_labels\r\n\r\n# Load data\r\nN_data, train_images, train_labels, test_images, test_labels = load_mnist()\r\n\r\nN = 10000 # Number of data points in training set\r\ntrain_images = train_images[:N,:] # 10k x 784 array\r\ntrain_labels = train_labels[:N,:] # 10k x 10 array\r\ntrain_images = np.ndarray.round(train_images) # Binarize the data\r\n\r\n# Fit theta\r\nNcd = np.matmul(np.transpose(train_images),train_labels) # 784 x 10 array\r\nNc = train_labels.sum(axis=0)\r\n#print(Nc)\r\n#print(train_labels[0])\r\n#print(train_labels[1])\r\n\r\n#print(\"Ncd\")\r\n#print(Ncd)\r\n#print(\"Nc\")\r\n#print(Nc)\r\nthetaHat = (1+Ncd)/(2+Nc) # 784 x 10 array\r\nsave_images(np.transpose(thetaHat),'q1') # Plot thetaHat\r\n\r\nlogPtrain = np.matmul(train_images,np.log(thetaHat)) + \\\r\nnp.matmul(1-train_images,np.log(1-thetaHat)) # 10k x 10 array\r\navLtrain = np.mean(np.sum(logPtrain*train_labels,axis=1))\r\nlogPtest = np.matmul(test_images,np.log(thetaHat)) + \\\r\nnp.matmul(1-test_images,np.log(1-thetaHat)) # 10k x 10 array\r\navLtest = np.mean(np.sum(logPtest*test_labels,axis=1))\r\n\r\nprint(round(avLtrain,2), round(avLtest,2))\r\nprint(\"logpTrain\") \r\nprint(logPtrain)\r\nprint(\"npmeanptest\")\r\nprint(np.mean(logPtest))\r\nprint(\"theta\")\r\nprint(np.sum(thetaHat))\r\n\r\n# Predictive accuracy\r\nM = len(test_images) # Number of data points in test set\r\n# 10k x 1 vector indicating whether a prediction was correct (1) or not (0):\r\naccsTrain = train_labels[np.arange(N),logPtrain.argmax(1)]\r\naccTrain = sum(accsTrain)/N\r\naccsTest = test_labels[np.arange(M),logPtest.argmax(1)]\r\naccTest = sum(accsTest)/M\r\n\r\nprint (round(accTrain*100,2),round(accTest*100,2))\r\n\r\n\r\nc = (np.floor(np.random.rand(10)*10)).astype(int) # Pick the classes\r\n\r\nxt = np.random.rand(10,784) # Prepare to sample 10 images\r\nthresh = np.asmatrix(thetaHat[:,c].T) # Set thresholds\r\nsample10 = 1*(thresh > np.asmatrix(xt)).T # Complete the sampling\r\nsave_images(np.transpose(sample10),'q2')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7920354008674622, "alphanum_fraction": 0.8053097128868103, "avg_line_length": 112, "blob_id": "9487a033082932b4ac5880f92e25d066ced79f77", "content_id": "a4b02c45469ee0b3ec5513eb0fb021021f0d6070", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 238, "license_type": "no_license", "max_line_length": 192, "num_lines": 2, "path": "/README.md", "repo_name": "leetim13/PCA-and-Naive-Bayes-Classifier", "src_encoding": "UTF-8", "text": "# PCA-and-Naive-Bayes-Classifier\nProjected training data of ’2’s’ and ’3’s’ to lower dimensions and used a 1-NN classifier on the first K principal components. Experimented the same classification with Naives Bayes Classifier\n" }, { "alpha_fraction": 0.5981276631355286, "alphanum_fraction": 0.6198196411132812, "avg_line_length": 42.45771026611328, "blob_id": "4e389811aabd70ba81dc722852970a5a5602fb7e", "content_id": "2c759899e32478cc0d27980b7f9dd46ae3f478b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8759, "license_type": "no_license", "max_line_length": 110, "num_lines": 201, "path": "/utils.py", "repo_name": "leetim13/PCA-and-Naive-Bayes-Classifier", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.decomposition import PCA #used to build intuition\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef load_data(filename, load2=True, load3=True):\n \"\"\"Loads data for 2's and 3's\n Inputs:\n filename: Name of the file.\n load2: If True, load data for 2's.\n load3: If True, load data for 3's.\n \"\"\"\n assert (load2 or load3), \"Atleast one dataset must be loaded.\"\n data = np.load(filename)\n if load2 and load3:\n inputs_train = np.hstack((data['train2'], data['train3']))\n inputs_valid = np.hstack((data['valid2'], data['valid3']))\n inputs_test = np.hstack((data['test2'], data['test3']))\n target_train = np.hstack((np.zeros((1, data['train2'].shape[1])), np.ones((1, data['train3'].shape[1]))))\n target_valid = np.hstack((np.zeros((1, data['valid2'].shape[1])), np.ones((1, data['valid3'].shape[1]))))\n target_test = np.hstack((np.zeros((1, data['test2'].shape[1])), np.ones((1, data['test3'].shape[1]))))\n else:\n if load2:\n inputs_train = data['train2']\n target_train = np.zeros((1, data['train2'].shape[1]))\n inputs_valid = data['valid2']\n target_valid = np.zeros((1, data['valid2'].shape[1]))\n inputs_test = data['test2']\n target_test = np.zeros((1, data['test2'].shape[1]))\n else:\n inputs_train = data['train3']\n target_train = np.zeros((1, data['train3'].shape[1]))\n inputs_valid = data['valid3']\n target_valid = np.zeros((1, data['valid3'].shape[1]))\n inputs_test = data['test3']\n target_test = np.zeros((1, data['test3'].shape[1]))\n\n return inputs_train.T, inputs_valid.T, inputs_test.T, target_train.T, target_valid.T, target_test.T\n\n#Intution of displaying PCA compoenents vs images\ndef show(g, imshape, i, j, x, title=None):\n ax = fig.add_subplot(g[i, j], xticks=[], yticks=[])\n ax.imshow(x.reshape(imshape), interpolation='nearest')\n if title:\n ax.set_title(title, fontsize=12)\n \ndef plot_pca_components(x, coefficients=None, mean=0, components=None,\n imshape=(16, 16), n_components=8, fontsize=12,\n show_mean=True):\n '''Building intuition by viewing top k PCA\n '''\n if coefficients is None:\n coefficients = x\n if components is None:\n components = np.eye(len(coefficients), len(x))\n mean = np.zeros_like(x) + mean\n fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2))\n g = plt.GridSpec(2, 4 + bool(show_mean) + n_components, hspace=0.3)\n show(g, imshape, slice(2), slice(2), x, \"True Original Image\")\n approx = mean.copy()\n counter = 2\n if show_mean:\n show(g, imshape, 0, 2, np.zeros_like(x) + mean, r'$mean$')\n show(g, imshape,1, 2, approx)\n counter += 1\n for i in range(n_components):\n approx = approx + coefficients[i] * components[i]\n show(g, imshape,0, i + counter, components[i], r'$k_{0}$'.format(i + 1))\n show(g, imshape,1, i + counter, approx,\n r\"${0:.2f} \\cdot c_{1}$\".format(coefficients[i], i + 1))\n if show_mean or i > 0:\n plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom',\n transform=plt.gca().transAxes, fontsize=fontsize)\n show(g, imshape, slice(2), slice(-2, None), approx, \"Approx\")\n return fig\n\ndef plot_digits(data):\n fig, axes = plt.subplots(10, 10, figsize=(10, 4),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\n for i, ax in enumerate(axes.flat):\n ax.imshow(data[i].reshape(16, 16), #reshape into 16x16 in order to be displayed\n cmap='binary', interpolation='nearest')\n\ndef first_k_components(training_data, k):\n '''\n Plot of first k_components vs eigen values\n '''\n mean = np.mean(training_data, axis =0)\n num_repeated = training_data.shape[0], 1\n centered_data = training_data - np.tile(mean, num_repeated) #subtract the mean of training data\n data_T = centered_data.T\n covariance_matrix = np.cov(data_T)\n eigen_values_cov , eigen_vectors_cov = np.linalg.eig(covariance_matrix )\n eigen_values_cov = eigen_values_cov[:: -1]\n eigen_vectors_cov = eigen_vectors_cov[:: -1]\n plt.figure()\n length = np.arange(0.0 , len(eigen_values_cov ) , 1) #[0,1,...,256]\n plt.plot(length, eigen_values_cov )\n plt.xlabel (\"number of eigenvectors\")\n plt.ylabel (\"accuracy\")\n plt.title (\"plot of eigenvalues of covarience\")\n plt.grid ( True )\n eigen_values = eigen_values_cov[:k]\n eigen_vectors = eigen_vectors_cov[:k ,:]\n# print(eigen_values.shape) #(10,)\n# print(eigen_vectors.shape) #(10, 256)\n return eigen_values, eigen_vectors , mean\n\ndef one_nn_classifier(train_data, train_labels, valid_data, k=1) :\n N = len(valid_data)\n shape = N, 1\n valid_labels = np.zeros(shape) #initialize empty\n train_data_N = len(train_data)\n \n for i in range (N):\n min_index = -1\n min_value = np.inf\n for j in range (train_data_N):\n euclidean_distance = np.linalg.norm(valid_data[i]- train_data[j])\n if euclidean_distance < min_value :\n min_value = euclidean_distance\n min_index = j\n valid_labels[i] = train_labels[min_index]\n return valid_labels\n\ndef extract_eigen_features(training_data):\n mean = np.mean(training_data , axis =0)\n num_repeated = training_data.shape[0], 1\n centered_data = training_data - np.tile(mean, num_repeated)\n data_T = centered_data.T\n covariance_matrix = np.cov(data_T)\n eigen_values_cov , eigen_vectors_cov = np.linalg.eig(covariance_matrix )\n # print(eigen_values_cov.shape) #(10,)\n# print(eigen_vectors_cov.shape) #(10, 256)\n sorted_eigen_values = eigen_values_cov.argsort()[:: -1] #vector of sorted eigen values asc\n eigen_values = eigen_values_cov[sorted_eigen_values]\n eigen_vectors = eigen_vectors_cov [:,sorted_eigen_values]\n return eigen_values , eigen_vectors , mean\n\ndef accuracy (prediction_value, target_value):\n return np.mean(target_value==prediction_value)\n\ndef train_model_pca(given_K , inputs_train , inputs_valid , target_train , target_valid):\n accuracy_list = []\n eigen_values , eigen_vectors , mean = extract_eigen_features(inputs_train)\n for k in given_K :\n code_vectors = eigen_vectors[: ,: k]\n# print(top_k_vector)\n# top_k_value = value [: k]\n num_repeated_training = (inputs_train.shape[0] , 1)\n# print(num_repeated_training.shape) #600\n centered_training_data = inputs_train - np.tile(mean, num_repeated_training )\n num_repeated_valid = (inputs_valid.shape[0] , 1)\n# print(num_repeated_valid.shape) #200\n centered_valid_data = inputs_valid - np .tile(mean, num_repeated_valid)\n \n #projection onto the low-dimensional space\n low_dim_space_valid = np.dot(centered_valid_data, code_vectors)\n low_dim_space_train = np.dot(centered_training_data, code_vectors)\n \n #using 1-NN classifier on K dimensional features\n low_dim_space_target = one_nn_classifier(low_dim_space_train , target_train , low_dim_space_valid )\n accuracy_ = accuracy(low_dim_space_target, target_valid)\n error = 1 - accuracy_\n accuracy_list.append(error)\n plt.figure()\n plt.grid(True)\n plt.plot(given_K , accuracy_list)\n plt.xlabel('first K principal components')\n plt.ylabel('Classification Error rates')\n plt.title ('plot of accuracy vs. #of eigen vectors')\n return accuracy_list\n\n\nif __name__ == '__main__':\n inputs_train, inputs_valid, inputs_test, target_train, target_valid, target_test = load_data(\"digits.npz\")\n# print(inputs_train.shape)\n# print(inputs_valid.shape)\n# print(target_train.shape)\n# print(target_valid.shape)\n \n given_K = [2 , 5, 10 , 20 , 30]\n# view_eig_vector_images (10 , top_k_vector , mean )\n accuracy_list = train_model_pca(given_K, inputs_train , inputs_valid , target_train , target_valid)\n \n# print (accuracy_k)\n best_K = 20 #after selection\n accuracy_list = train_model_pca([best_K], inputs_train , inputs_test , target_train , target_test)\n print (\"Error of K=\" + str(best_K) + \" = \" + str(accuracy_list[0]))\n\n pca = PCA().fit(inputs_train) #only used to build intuition \n plt.plot(np.cumsum(pca.explained_variance_ratio_))\n plt.xlabel('number of PCA components')\n plt.ylabel('amount of variance explained')\n pca = PCA(n_components=20)\n Xproj = pca.fit_transform(inputs_train)\n fig = plot_pca_components(inputs_train[155], Xproj[155],\n pca.mean_, pca.components_)\n plot_digits(inputs_train)\n plt.show\n \n \n \n \n " }, { "alpha_fraction": 0.5641973614692688, "alphanum_fraction": 0.5936043858528137, "avg_line_length": 35.16860580444336, "blob_id": "4a4d9426cc184196f2b179eb4bc494d28d059df5", "content_id": "6bd3edf0fba24b5c0fbcb60586ceacbfbbe05ce7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6223, "license_type": "no_license", "max_line_length": 126, "num_lines": 172, "path": "/model.py", "repo_name": "leetim13/PCA-and-Naive-Bayes-Classifier", "src_encoding": "UTF-8", "text": "from loadMNIST import *\nfrom scipy.special import logsumexp \nimport autograd as ag\nimport time\n# np.random.seed(1)\n\n\nCOLORS = [\"indianred\", \"palegoldenrod\", \"black\", \"gray\"]\n\n\ndef get_images_by_label(images, labels, query_label):\n\t\t\"\"\"\n\t\tHelper function to return all images in the provided array which match the query label.\n\t\t\"\"\"\n\t\tassert images.shape[0] == labels.shape[0]\n\t\tmatching_indices = labels == query_label\n\t\treturn images[matching_indices]\n\n\nclass NaiveBayes:\n \n def avg_log_likelihood(self, X, y, theta):\n ll = 0\n for c in range(10):\n X_c = get_images_by_label(X, y, c)\n log_p_x = logsumexp(np.log(0.1) + np.dot(X_c, np.log(theta.T)) + np.dot((1. - X_c), np.log(1. - theta.T)), axis=1)\n ll += np.sum(np.dot(X_c, np.log(theta[c])) + np.dot((1. - X_c), np.log(1. - theta[c])) + np.log(0.1) - log_p_x)\n return ll / X.shape[0]\n \n def log_likelihood(self, X, y, theta):\n# print()\n ll = np.zeros((X.shape[0], 10))\n# print(ll.shape)\n log_p_x = logsumexp(np.log(0.1) + np.dot(X, np.log(theta.T)) + np.dot((1. - X), np.log(1. - theta.T)), axis=1)\n# print(\"log_p_x\")\n# print(log_p_x.shape)\n# print(\"log\")\n# print(np.log(0.1).shape)\n for c in range(10):\n ll[:, c] = np.dot(X, np.log(theta[c])) #+ np.dot((1. - X), np.log(1. - theta[c])) + np.log(0.1) - log_p_x\n return ll\n \n def __init__(self, train_images, train_labels):\n self.train_images = train_images\n self.train_labels = train_labels\n \n def predict(self, X, y, theta, train=False, test=False):\n ll = self.log_likelihood(X, y, theta)\n pred = np.argmax(ll, axis=1)\n avg_ll = self.avg_log_likelihood(X, y, theta)\n accuracy = np.mean(pred == y)\n name = \"test\" if test else \"train\"\n print(\"average log-likelihood of naive bayes model on the {} set: \".format(name) + str(avg_ll))\n#\t\tprint(\"accuracy of naive bayes model on the {} set: \".format(name) + str(accuracy)) \n \n def map_naive_bayes(self, plot=False):\n theta = np.zeros((10, 784))\n for c in range(10):\n images = get_images_by_label(self.train_images, self.train_labels, c)\n theta[c] = np.divide(np.sum(images, axis=0) + 1., images.shape[0] + 2.)\n#\t\tif plot:\n#\t\t\tsave_images(theta, \"theta_map.png\")\n return theta\n\n\n\n\n\n\n\n\n\nclass GenerativeNaiveBayes:\n def __init__(self, theta):\n self.theta = theta\n \n\n \n def sample_plot(self):\n# c= np.random.multinomial(10, [0.1]*10)\n# c = np.random.choice(9,10)\n# images = np.zeros((10,784))\n# count = 0\n# for i in range(10):\n# for j in range((c[i])):\n# images[count] = np.random.binomial(1, self.theta[i]).reshape((1, 784))\n# count +=1\n# save_images(images, \"samples.png\")\n \n c = np.random.choice(9,10)\n images = np.zeros((10,784))\n count = 0\n print(self.theta.shape)\n for i in range(10):\n images[count] = np.random.binomial(1, self.theta[i]).reshape((1, 784))\n count +=1\n save_images(images, \"samples.png\")\n# c = (np.floor(np.random.rand(10)*10)).astype(int) # Pick the classes\n# xt = np.random.rand(10,784) # Prepare to sample 10 images\n# thresh = np.asmatrix(self.theta[:,c].T) # Set thresholds\n# sample10 = 1*(thresh > np.asmatrix(xt)).T # Complete the sampling\n# data.save_images(np.transpose(sample10),'ques2c')\n#\tdef sample_plot(self):\n#\t\t\"\"\"\n#\t\trandomly sample and plot 10 binary images from the marginal distribution, p(x|theta, pi)\n#\t\t\"\"\"\n# \n#\t\tc = np.random.multinomial(10, [0.1]*10)\n## images = np.zeros((10, 784))\n# images = np.zeros((10,784))\n#\t\tfor i in range(10):\n# for j in range((c[i])):\n#\t\t\t\timages[count] = np.random.binomial(1, self.theta[i]).reshape((1, 784))\n#\t\t\t\tcount += 1\n#\t\tsave_images(images, \"samples.png\")\n\n#\tdef predict_half(self, X_top):\n#\t\t\"\"\"\n#\t\tplot the top half the image concatenated with the marginal distribution over each pixel in the bottom half.\n#\t\t\"\"\"\n#\t\tX_bot = np.zeros((X_top.shape[0], X_top.shape[1]))\n#\t\ttheta_top, theta_bot = self.theta[:, :392].T, self.theta[:, 392:].T\n#\t\tfor i in range(392):\n#\t\t\tconstant = np.dot(X_top, np.log(theta_top)) + np.dot(1 - X_top, np.log(1 - theta_top))\n#\t\t\tX_bot[:, i] = logsumexp(np.add(constant, np.log(theta_bot[i])), axis=1) - logsumexp(constant, axis=1) \n#\t\tsave_images(np.concatenate((X_top, np.exp(X_bot)), axis=1), \"predict_half.png\")\n\n\n\nif __name__ == '__main__':\n\tstart = time.time()\n\tprint(\"loading data...\")\n\tN_data, train_images, train_labels, test_images, test_labels = load_mnist()\n\ttrain_labels = np.argmax(train_labels, axis=1)\n\ttest_labels = np.argmax(test_labels, axis=1)\n\n\tprint(\"trainning a Naive Bayes model...\")\n\tnb_model = NaiveBayes(train_images, train_labels)\n\ttheta_map = nb_model.map_naive_bayes(plot=True)\n\tnb_model.predict(train_images, train_labels, theta_map, train=True)\n\tnb_model.predict(test_images, test_labels, theta_map, test=True)\n\n\tprint(\"training a generative Naive Bayes model...\")\n\tgnb = GenerativeNaiveBayes(theta_map)\n\tgnb.sample_plot()\n \n#\tgnb.predict_half(train_images[:20,:392])\n\n#\tprint(\"training a softmax model...\")\n#\tlr_model = LogisticRegression(train_images, train_labels)\n#\tlr_model.predict(train_images, train_labels, train=True)\n#\tlr_model.predict(test_images, test_labels, test=True)\n#\n#\tprint(\"training K mean and GMM algorithms...\")\n#\tinitials = {'Nk': 200,\n#\t\t\t\t'MIU1': np.array([0.1, 0.1]),\n#\t\t\t\t'MIU2': np.array([6., 0.1]),\n#\t\t\t\t'COV': np.array([[10., 7.], [7., 10.]]),\n#\t\t\t\t'MIU1_HAT': np.array([0., 0.]),\n#\t\t\t\t'MIU2_HAT': np.array([1., 1.])\n#\t\t\t\t}\n#\t# Sampling data from a multivariate guassian distribution\n#\tc1 = np.random.multivariate_normal(initials['MIU1'], initials['COV'], initials['Nk'])\n#\tc2 = np.random.multivariate_normal(initials['MIU2'], initials['COV'], initials['Nk'])\n#\tkmean = KMean(initials, c1, c2)\n#\tkmean.plot_clusters()\n#\tkmean.train()\n#\tgmm = GaussianMixtures(initials, c1, c2)\n#\tgmm.train()\n#\tend = time.time()\n#\tprint(\"running time: {}s\".format(round(end - start, 2)))\n\tplt.show()\n\n\n" } ]
4
datmemerboi/PyQt-Image-Display
https://github.com/datmemerboi/PyQt-Image-Display
256a0b829c643b346d0c181c8a14ae5239b0b08c
25abb69004e0a2691a89a8ff52f801c89db7a3c3
443172b701de85d310f2e0942a6dd74d3a23a5f4
refs/heads/master
2020-07-04T05:11:37.814435
2019-08-13T15:15:26
2019-08-13T15:15:26
202,167,578
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.697041392326355, "alphanum_fraction": 0.7349112629890442, "avg_line_length": 28.172412872314453, "blob_id": "e44b287851152dc8ff94df93e962abf959332d70", "content_id": "c7fc3d0d8081753ea06f39a6af072028b83ff064", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 119, "num_lines": 29, "path": "/DisplayImage.py", "repo_name": "datmemerboi/PyQt-Image-Display", "src_encoding": "UTF-8", "text": "# Display Image\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QMainWindow\nfrom PyQt5.QtGui import QIcon, QPixmap\nimport sys\n\nclass App(object):\n\tdef AppWindow(self, window):\n\t\twindow.setWindowTitle(\"Pulp Fiction\")\n\t\twindow.setGeometry(0, 0, 1920, 1080)\n\n\t\tframe1 = QLabel(window)\n\n\t\tframe1File = open('frame1.css', 'r')\n\t\tframe1Contents = frame1File.read()\n\t\tframe1File.close()\n\n\t\tframe1.setStyleSheet(frame1Contents)\n\t\tpixmap = QPixmap(\"https://raw.githubusercontent.com/datmemerboi/PyQt-Image-Display/master/PF.png\").scaledToWidth(900)\n\t\t# File System link to be replaced ^\n\t\tframe1.resize(pixmap.width(), pixmap.height())\n\t\tframe1.move(230, 100)\n\t\tframe1.setPixmap(pixmap)\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\tThisWindow = QMainWindow()\n\tApp().AppWindow(ThisWindow)\n\tThisWindow.show()\n\tsys.exit(app.exec_())" }, { "alpha_fraction": 0.7736625671386719, "alphanum_fraction": 0.7736625671386719, "avg_line_length": 29.5, "blob_id": "f16bcfeb32bddff981324c3e55863dd517994257", "content_id": "6370aec4b3a77ac8c3a896d6ed57f873ab868fca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 243, "license_type": "no_license", "max_line_length": 95, "num_lines": 8, "path": "/README.md", "repo_name": "datmemerboi/PyQt-Image-Display", "src_encoding": "UTF-8", "text": "# PyQt Image Display\n## Using PyQt\n\nHow to display an image on a PyQt window\n\nJust to create a UI window using PyQt and displaying an image on it.\n\n![Pulp Fiction](https://raw.githubusercontent.com/datmemerboi/PyQt-Image-Display/master/PF.png)" } ]
2
Cepheux/singapore-postal-codes
https://github.com/Cepheux/singapore-postal-codes
9cdb048b65e9373d093628145a566ba517a251e8
463cd26143b4552393d694f24015b4dca91db0ca
6d6aca5b038b3e094f238e05f2be15db75dae7b8
refs/heads/master
2023-03-17T11:10:35.218035
2020-07-22T04:33:19
2020-07-22T04:33:19
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5635837912559509, "alphanum_fraction": 0.5722543597221375, "avg_line_length": 48.14285659790039, "blob_id": "2515b54b28a67bd58a0ef0cc6986b9f7c7c2e69d", "content_id": "5803bfc55ee357fec83c640e19c9328a608b83da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 346, "license_type": "no_license", "max_line_length": 98, "num_lines": 7, "path": "/process.sh", "repo_name": "Cepheux/singapore-postal-codes", "src_encoding": "UTF-8", "text": "\nzcat -f singpostcode.json.gz | sed \"s:\\([&#@]\\): \\1 :g; s: RD : ROAD :g; s: *: :g\" | python3 -c \"\nimport os, sys, json\ntrim = lambda t: ' '.join(t.split())\ndb = json.loads(sys.stdin.read())\ndb = [{k:trim(v.replace(',', ' ') if k=='ADDRESS' else v) for k,v in d.items()} for d in db]\nprint(json.dumps(db, indent=1))\n\" | gzip > database.json.gz\n\n" }, { "alpha_fraction": 0.7364621162414551, "alphanum_fraction": 0.7809867858886719, "avg_line_length": 68.25, "blob_id": "c433643f1d1f160321b3c4506e5ccd3f8698cd84", "content_id": "6e7c851c1650527235077556c2cddac0d62ab78b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 831, "license_type": "no_license", "max_line_length": 209, "num_lines": 12, "path": "/README.md", "repo_name": "Cepheux/singapore-postal-codes", "src_encoding": "UTF-8", "text": "Singapore Postal Codes\n======================\n\nThis is an improved version from https://github.com/xkjyeah/singapore-postal-codes\nIt also contains a dump of all Singapore postal codes retrieved from Onemap.sg.\nThe download script and the data dump are attached.\n\nNote: Use of the data is governed by the [Open Data Licence](https://www.onemap.sg/legal/opendatalicence.html)\n\n- This data dump contains information from Onemap.sg postal code search accessed on 10 Jun 2020, or later if the date is specified in the commit message.\n- This data dump contains information from MyTransport.sg static data, accessed 1 Dec 2017.\n- For postal codes, the 2017 database contains 141726 entries, and the 2020 database contains 141848 entries. Interestingly, the 2020 database is about 25% smaller because it keeps fewer floating point digits.\n" }, { "alpha_fraction": 0.6082473993301392, "alphanum_fraction": 0.6207044720649719, "avg_line_length": 26.702381134033203, "blob_id": "d8d5c5bcb61230431a94e27f6ac203892c1a63bc", "content_id": "99feb862cb2b15ef3cdc17fb8ff3c504dc19664d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2328, "license_type": "no_license", "max_line_length": 119, "num_lines": 84, "path": "/dbsearch.py", "repo_name": "Cepheux/singapore-postal-codes", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport os, sys, gzip, json, argparse, re\n\n\ndef Open(fn, mode='r', **kwargs):\n\tif fn == '-':\n\t\treturn sys.stdin if mode.startswith('r') else sys.stdout\n\treturn gzip.open(fn, mode, **kwargs) if fn.lower().endswith('.gz') else open(fn, mode, **kwargs)\n\ntrim = lambda s: ' '.join(s.split())\n\nclass AddrDB:\n\tdef __init__(self, fn_or_fp=None):\n\t\tif fn_or_fp == None:\n\t\t\ttxt = '[]'\n\t\telif type(fn_or_fp) == str:\n\t\t\ttxt = Open(fn_or_fp, 'rt').read()\n\t\telse:\n\t\t\ttxt = fn_or_fp.read()\n\t\t\tif type(txt) != str:\n\t\t\t\ttxt = txt.decode('utf8', 'ignore')\n\t\tself.db = json.loads(txt)\n\n\tdef search(self, addrname):\n\t\tname = addrname.upper().replace(',', ' ')\n\t\tname = trim(re.sub('([&#@()])', ' \\\\1 ', name))\n\t\tnames = name.split()\n\n\t\t# try to extract BLK number\n\t\ttry:\n\t\t\tblk_pos = names.index('BLK') if 'BLK' in name else (names.index['BLOCK'] if 'BLOCK' in names else None)\n\t\t\tblk = names[blk_pos+1]\n\t\t\tdel names[blk_pos:blk_pos+2]\n\t\texcept:\n\t\t\tblk = None\n\n\t\t# try to extract names inside ()\n\t\tbrack_data = []\n\t\ttry:\n\t\t\twhile '(' in names:\n\t\t\t\tpos1 = names.index('(')\n\t\t\t\tpos2 = names.index(')', pos1+1)\n\t\t\t\tif pos2>pos1+1:\n\t\t\t\t\tbrack_data += [' '.join(names[pos1+1:pos2])]\n\t\t\t\tdel names[pos1:pos2+1]\n\t\texcept:\n\t\t\tpass\n\n\t\ts_pattn = ' '.join(names)\n\t\tres = [i for i in self.db if s_pattn in i['ADDRESS']]\n\n\t\t# confine search by block number\n\t\tif blk != None and len(res)>1:\n\t\t\tres1 = [i for i in res if i['BLK_NO']==blk]\n\t\t\tres = res1 if res1 else res\n\n\t\t# confine search by names in brackets\n\t\twhile brack_data and len(res)>1:\n\t\t\te = ' %s '%brack_data.pop()\n\t\t\tres1 = [i for i in res if e in ' %s '%(i['ADDRESS'])]\n\t\t\tres = res1 if res1 else res\n\n\t\treturn res\n\n\nif __name__=='__main__':\n\tparser = argparse.ArgumentParser(usage='$0 <input 1>output 2>progress', description='perform street directory search',\n\t\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('--addr-db', '-d', help='Singapore address database file', type=str, default='database.json.gz')\n\tparser.add_argument('-optional', help='optional argument')\n\t#nargs='?': optional positional argument; action='append': multiple instances of the arg; type=; default=\n\topt=parser.parse_args()\n\tglobals().update(vars(opt))\n\n\tdb = AddrDB(addr_db)\n\n\twhile True:\n\t\ttry:\n\t\t\tL = input()\n\t\t\tres = db.search(L)\n\t\t\tprint(res)\n\t\texcept:\n\t\t\tbreak\n\n" }, { "alpha_fraction": 0.7655502557754517, "alphanum_fraction": 0.7703348994255066, "avg_line_length": 17.909090042114258, "blob_id": "80a30fb0498b4a381ac9d975cfce6051ea0aa44a", "content_id": "f8a76eb72444137c562b5337dc491f07574c4382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 209, "license_type": "no_license", "max_line_length": 62, "num_lines": 11, "path": "/update_all.sh", "repo_name": "Cepheux/singapore-postal-codes", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# requirements\n# python3\n# pip install tqdm\n\n# create/updates singpostcode.json.gz\npython download_postcodes.py\n\n# cleans singpostcode.json.gz and saves it as database.json.gz\nprocess.sh\n\n" } ]
4
ankitshah009/gradinit
https://github.com/ankitshah009/gradinit
a6c8183cc2ad3f31efb897a822bb9b44da385c40
2074528157129d5fc4b3807163778c8a32466b32
53972ddf51294c5ac35054262affb2101ed1244e
refs/heads/master
2023-03-20T16:00:23.255290
2021-03-09T09:51:02
2021-03-09T09:51:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5741176605224609, "alphanum_fraction": 0.5788235068321228, "avg_line_length": 21.3157901763916, "blob_id": "699d9dbaa51c99b2f40afbae78063d0f8994250c", "content_id": "ac75c7b9cf0d62472f5853843fc270791bdbfd3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/models/modules.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "import torch\n\n\nclass Scale(torch.nn.Module):\n def __init__(self):\n super(Scale, self).__init__()\n self.weight = torch.nn.Parameter(torch.ones(1))\n\n def forward(self, x):\n return x * self.weight\n\n\nclass Bias(torch.nn.Module):\n def __init__(self):\n super(Bias, self).__init__()\n self.bias = torch.nn.Parameter(torch.zeros(1))\n\n def forward(self, x):\n return x + self.bias\n\n" }, { "alpha_fraction": 0.7424242496490479, "alphanum_fraction": 0.7424242496490479, "avg_line_length": 25.399999618530273, "blob_id": "eeddcb33673f953fec5f6185abd0f5905c0e46f1", "content_id": "ea7d509b6ffe2350a2e0a8aa17eb593512587186", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/models/__init__.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "from .resnet_cifar import *\nfrom .densenet import *\nfrom .fixup_resnet_cifar import *\nfrom .vgg import *\nfrom .wide_resnet import *\n" }, { "alpha_fraction": 0.528744637966156, "alphanum_fraction": 0.547125518321991, "avg_line_length": 28.73255729675293, "blob_id": "11b3d5637ea9c75ee642e7a93ffebdb31f06f2a9", "content_id": "b8999ae1c62efdd8f89bda07e439882280b683ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2557, "license_type": "no_license", "max_line_length": 92, "num_lines": 86, "path": "/utils.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "import torch.nn as nn\nimport torch.nn.init as init\n\nimport numpy as np\nimport torch\n\ndef mixup_data(x, y, alpha=1.0, use_cuda=True, per_sample=False):\n\n '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n if alpha > 0. and not per_sample:\n lam = torch.zeros(y.size()).fill_(np.random.beta(alpha, alpha)).cuda()\n mixed_x = lam.view(-1, 1, 1, 1) * x + (1 - lam.view(-1, 1, 1, 1)) * x[index,:]\n elif alpha > 0.:\n lam = torch.Tensor(np.random.beta(alpha, alpha, size=y.size())).cuda()\n mixed_x = lam.view(-1, 1, 1, 1) * x + (1 - lam.view(-1, 1, 1, 1)) * x[index,:]\n else:\n lam = torch.ones(y.size()).cuda()\n mixed_x = x\n\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef mixup_lam_idx(batch_size, alpha, use_cuda=True):\n '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''\n if alpha > 0.:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1.\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n return lam, index \n\n\ndef mixup_criterion(y_a, y_b, lam):\n return lambda criterion, pred: criterion(pred, y_a, lam) + criterion(pred, y_b, 1 - lam)\n\n\nclass Cutout(object):\n \"\"\"Randomly mask out one or more patches from an image.\n\n Args:\n n_holes (int): Number of patches to cut out of each image.\n length (int): The length (in pixels) of each square patch.\n \"\"\"\n def __init__(self, n_holes, length):\n self.n_holes = n_holes\n self.length = length\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (Tensor): Tensor image of size (C, H, W).\n Returns:\n Tensor: Image with n_holes of dimension length x length cut out of it.\n \"\"\"\n h = img.size(1)\n w = img.size(2)\n\n mask = np.ones((h, w), np.float32)\n\n for n in range(self.n_holes):\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img = img * mask\n\n return img\n" }, { "alpha_fraction": 0.6005630493164062, "alphanum_fraction": 0.6176102757453918, "avg_line_length": 38.226993560791016, "blob_id": "817d3d40b72c3fdc0c4bba8db6652cf28eb64a5b", "content_id": "8e58dc93bc0b819fc2eb65a4881266133acebfdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12788, "license_type": "no_license", "max_line_length": 141, "num_lines": 326, "path": "/train_cifar.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nimport models\n\nimport numpy\nimport random\nfrom gradinit_utils import gradinit, metainit\nfrom utils import Cutout, mixup_criterion, mixup_data\n\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='gradinit_resnet110', choices=model_names,\n help='model architecture: ' + ' | '.join(model_names) + ' (default: gradinit_resnet110)')\nparser.add_argument('--resume', default='', type=str,\n help='resume from checkpoint')\nparser.add_argument('--seed', default=0, type=int,\n help='rng seed')\nparser.add_argument('--alpha', default=1., type=float,\n help='interpolation strength (uniform=1., ERM=0.)')\nparser.add_argument('--wd', default=1e-4, type=float,\n help='weight decay (default=1e-4)')\nparser.add_argument('--batchsize', default=128, type=int,\n help='batch size per GPU (default=128)')\nparser.add_argument('--n_epoch', default=200, type=int,\n help='total number of epochs')\nparser.add_argument('--base_lr', default=0.1, type=float,\n help='base learning rate (default=0.1)')\nparser.add_argument('--train-clip', default=-1, type=float,\n help='Clip the gradient norm during training.')\nparser.add_argument('--expname', default=\"default\", type=str)\nparser.add_argument('--no_bn', default=False, action='store_true')\nparser.add_argument('--dataset', default='cifar10', type=str)\nparser.add_argument('--cutout', default=False, action='store_true')\nparser.add_argument('--train-loss', default='ce', type=str, choices=['ce', 'mixup'])\n\nparser.add_argument('--metainit', default=False, action='store_true',\n help='Whether to use MetaInit.')\nparser.add_argument('--gradinit', default=False, action='store_true',\n help='Whether to use GradInit.')\nparser.add_argument('--gradinit-lr', default=1e-3, type=float,\n help='The learning rate of GradInit.')\nparser.add_argument('--gradinit-iters', default=390, type=int,\n help='Total number of iterations for GradInit.')\nparser.add_argument('--gradinit-alg', default='sgd', type=str,\n help='The target optimization algorithm, deciding the direction of the first gradient step.')\nparser.add_argument('--gradinit-eta', default=0.1, type=float,\n help='The eta in GradInit.')\nparser.add_argument('--gradinit-min-scale', default=0.01, type=float,\n help='The lower bound of the scaling factors.')\nparser.add_argument('--gradinit-grad-clip', default=1, type=float,\n help='Gradient clipping (per dimension) for GradInit.')\nparser.add_argument('--gradinit-gamma', default=float('inf'), type=float,\n help='The gradient norm constraint.')\nparser.add_argument('--gradinit-normalize-grad', default=False, action='store_true',\n help='Whether to normalize the gradient for the algorithm A.')\nparser.add_argument('--gradinit-resume', default='', type=str,\n help='Path to the gradinit or metainit initializations.')\nparser.add_argument('--gradinit-bsize', default=-1, type=int,\n help='Batch size for GradInit. Set to -1 to use the same batch size as training.')\nparser.add_argument('--batch-no-overlap', default=False, action='store_true',\n help=r'Whether to make \\tilde{S} and S different.')\nargs = parser.parse_args()\nprint(args)\n\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\nnumpy.random.seed(args.seed)\nrandom.seed(args.seed)\ntorch.backends.cudnn.deterministic = True\n\nuse_cuda = torch.cuda.is_available()\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\nbatch_size = int(args.batchsize)\nbase_learning_rate = args.base_lr * args.batchsize / 128.\nif use_cuda:\n # data parallel\n n_gpu = torch.cuda.device_count()\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\nif args.cutout:\n transform_train.transforms.append(Cutout(n_holes=1, length=16))\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\nif args.dataset.lower() == 'cifar10':\n dset_class = torchvision.datasets.CIFAR10\n num_class = 10\nelif args.dataset.lower() == 'cifar100':\n dset_class = torchvision.datasets.CIFAR100\n num_class = 100\n\ntrainset = dset_class(root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)\n\ntestset = dset_class(root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nprint(\"=> creating model '{}'\".format(args.arch))\nnet = models.__dict__[args.arch](use_bn=not args.no_bn, num_classes=num_class)\n\nif use_cuda:\n net.cuda()\n net = torch.nn.DataParallel(net)\n print('Using', torch.cuda.device_count(), 'GPUs.')\n cudnn.benchmark = True\n print('Using CUDA..')\n\n# Model\nif args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n # checkpoint_file = './checkpoint/ckpt.t7.' + args.sess + '_' + str(args.seed)\n checkpoint = torch.load(args.resume)\n net.load_state_dict(checkpoint['net'].state_dict())\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch'] + 1\n torch.set_rng_state(checkpoint['rng_state'])\n\nresult_folder = './results/'\nif not os.path.exists(result_folder):\n os.makedirs(result_folder)\n\nif 'nobn' in args.arch or 'fixup' in args.arch or args.no_bn and 'resnet' in args.arch:\n parameters_bias = [p[1] for p in net.named_parameters() if 'bias' in p[0]]\n parameters_scale = [p[1] for p in net.named_parameters() if 'scale' in p[0]]\n parameters_others = [p[1] for p in net.named_parameters() if not ('bias' in p[0] or 'scale' in p[0] or 'autoinit' in p[0])]\n optimizer = optim.SGD(\n [{'params': parameters_bias, 'lr': args.base_lr/10.},\n {'params': parameters_scale, 'lr': args.base_lr/10.},\n {'params': parameters_others}],\n lr=base_learning_rate,\n momentum=0.9,\n weight_decay=args.wd)\nelse:\n bn_names = ['norm', 'bn']\n bn_params = []\n other_params = []\n bn_param_names = []\n for n, p in net.named_parameters():\n if any([k in n for k in bn_names]):\n bn_params.append(p)\n bn_param_names.append(n)\n else:\n other_params.append(p)\n optimizer = optim.SGD(\n [{'params': bn_params, 'weight_decay': 0},\n {'params': other_params, 'weight_decay': args.wd}],\n lr=base_learning_rate,\n momentum=0.9)\n\ntotal_params = sum([p.numel() for p in net.parameters()])\nprint(\">>>>>>>>>>>>>>> Total number of parameters: {}\".format(total_params))\n\nif args.gradinit:\n gradinit_bsize = int(args.batchsize / 2) if args.gradinit_bsize < 0 else int(args.gradinit_bsize / 2)\n gradinit_trainloader = torch.utils.data.DataLoader(\n trainset,\n batch_size=gradinit_bsize,\n shuffle=True)\n\n gradinit(net, gradinit_trainloader, args)\n\nif args.metainit:\n if args.arch == 'gradinit_resnet110':\n gradinit_trainloader = torch.utils.data.DataLoader(\n trainset,\n batch_size=int(args.batchsize / 2),\n shuffle=True)\n elif args.arch == 'gradinit_densenet100':\n gradinit_trainloader = torch.utils.data.DataLoader(\n trainset,\n batch_size=int(args.batchsize / 3),\n shuffle=True)\n else:\n gradinit_trainloader = torch.utils.data.DataLoader(\n trainset,\n batch_size=args.batchsize,\n shuffle=True)\n metainit(net, gradinit_trainloader, args)\n\ncel = nn.CrossEntropyLoss()\ncriterion = lambda pred, target, lam: (\n -F.log_softmax(pred, dim=1) * torch.zeros(pred.size()).cuda().scatter_(1, target.data.view(-1, 1),\n lam.view(-1, 1))).sum(dim=1).mean()\n\n\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n total_gnorm = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n # generate mixed inputs, two one-hot label vectors and mixing coefficient\n optimizer.zero_grad()\n\n if args.train_loss == 'mixup':\n inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, args.alpha, use_cuda)\n outputs = net(inputs)\n\n loss_func = mixup_criterion(targets_a, targets_b, lam)\n loss = loss_func(criterion, outputs)\n else:\n outputs = net(inputs)\n loss = cel(outputs, targets)\n\n loss.backward()\n\n if args.train_clip > 0:\n gnorm = torch.nn.utils.clip_grad_norm_(net.parameters(), args.train_clip)\n else:\n gnorm = -1\n total_gnorm += gnorm\n\n optimizer.step()\n sgdr.step()\n\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n _, predicted = torch.max(outputs.data, 1)\n correct += predicted.eq(targets.data).cpu().sum()\n acc = 100.*float(correct)/float(total)\n\n if batch_idx % 50 == 0 or batch_idx == len(trainloader) - 1:\n wnorms = [w.norm().item() for n, w in net.named_parameters() if 'weight' in n]\n print(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d) | WNorm: %.3e (min: %.3e, max: %.3e) | GNorm: %.3e (%.3e)'\n % (train_loss/(batch_idx+1), acc, correct, total, sum(wnorms), min(wnorms), max(wnorms), gnorm, total_gnorm / (batch_idx+1)))\n\n return train_loss/batch_idx, acc\n\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = net(inputs)\n loss = nn.CrossEntropyLoss()(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n if batch_idx % 50 == 0 or batch_idx == len(testloader) - 1:\n print(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*float(correct)/float(total), correct, total))\n\n # Save checkpoint.\n acc = 100.*float(correct)/float(total)\n if acc > best_acc:\n best_acc = acc\n checkpoint(acc, epoch)\n\n return test_loss/batch_idx, acc\n\n\ndef checkpoint(acc, epoch):\n # Save checkpoint.\n print('Saving..')\n state = {\n 'net': net,\n 'acc': acc,\n 'epoch': epoch,\n 'rng_state': torch.get_rng_state(),\n 'optimizer': optimizer.state_dict()\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/' + args.expname + '.ckpt')\n\n\nsgdr = CosineAnnealingLR(optimizer, args.n_epoch * len(trainloader), eta_min=0, last_epoch=-1)\n\nchk_path = os.path.join('chks', args.expname + \"_latest.pth\")\nfor epoch in range(start_epoch, args.n_epoch):\n lr = 0.\n\n train_loss, train_acc = train(epoch)\n test_loss, test_acc = test(epoch)\n print(\"Epoch {}, lr {}\".format(epoch, lr))\n\n torch.save({'model': net.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch+1, 'test_acc': test_acc},\n chk_path)\n" }, { "alpha_fraction": 0.5570818185806274, "alphanum_fraction": 0.5894383192062378, "avg_line_length": 32.09090805053711, "blob_id": "94c929586d08bf22f29271c49d8894a7538f2e1d", "content_id": "02f10ed79edbc2225aceafda511ea1401ec0b2b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3276, "license_type": "no_license", "max_line_length": 104, "num_lines": 99, "path": "/models/wide_resnet.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport itertools\n\n__all__ = ['wrn_28_10']\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return torch.nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n\nclass wide_basic(torch.nn.Module):\n def __init__(self, in_planes, planes, dropout_rate, stride=1, use_bn=True):\n super(wide_basic, self).__init__()\n\n self.use_bn = use_bn\n if self.use_bn:\n self.bn1 = torch.nn.BatchNorm2d(in_planes)\n self.bn2 = torch.nn.BatchNorm2d(planes)\n else:\n # use placeholders\n self.bn1 = torch.nn.Sequential()\n self.bn2 = torch.nn.Sequential()\n\n self.conv1 = torch.nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.conv2 = torch.nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n self.shortcut = torch.nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = torch.nn.Sequential(\n torch.nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),\n )\n\n def forward(self, x):\n out = self.dropout(self.conv1(F.relu(self.bn1(x))))\n out = self.conv2(F.relu(self.bn2(out)))\n out += self.shortcut(x)\n\n return out\n\n\nclass Wide_ResNet(torch.nn.Module):\n def __init__(self, depth, widen_factor, dropout_rate=0., num_classes=10, use_bn=True, **kwargs):\n super(Wide_ResNet, self).__init__()\n self.in_planes = 16\n\n assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'\n n = (depth-4)/6\n k = widen_factor\n\n print('| Wide-Resnet %dx%d' %(depth, k))\n nStages = [16, 16*k, 32*k, 64*k]\n\n self.use_bn = use_bn\n\n self.conv1 = conv3x3(3, nStages[0])\n self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)\n self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)\n self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)\n if self.use_bn:\n self.bn1 = torch.nn.BatchNorm2d(nStages[3], momentum=0.9)\n else:\n self.bn1 = torch.nn.Sequential()\n self.linear = torch.nn.Linear(nStages[3], num_classes)\n\n def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):\n strides = [stride] + [1]*(int(num_blocks)-1)\n layers = []\n\n for stride in strides:\n layers.append(block(self.in_planes, planes, dropout_rate, stride, use_bn=self.use_bn))\n self.in_planes = planes\n\n return torch.nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n\n return out\n\n\ndef wrn_28_10(**kwargs):\n return Wide_ResNet(28, 10, **kwargs)\n\n\nif __name__ == '__main__':\n net=Wide_ResNet(28, 10, 0.3, 10)\n y = net(Variable(torch.randn(1,3,32,32)))\n\n print(y.size())\n" }, { "alpha_fraction": 0.49138617515563965, "alphanum_fraction": 0.5047707557678223, "avg_line_length": 44.18562698364258, "blob_id": "ac4f726b747615634c388a649a9491bd5aa79f98", "content_id": "8c06d776238a124efe5233f38302950123e95959", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7546, "license_type": "no_license", "max_line_length": 111, "num_lines": 167, "path": "/gradinit_optimizers.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "import torch\nimport math\nimport pdb\n\nclass RescaleAdam(torch.optim.Optimizer):\n r\"\"\"Implements Adam algorithm.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n min_scale=0, grad_clip=0, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps, amsgrad=amsgrad, min_scale=min_scale, grad_clip=grad_clip)\n super(RescaleAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RescaleAdam, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n @torch.no_grad()\n def step(self, closure=None, is_constraint=False):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n grad_list = []\n alphas = []\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n # State initialization\n amsgrad = group['amsgrad']\n state = self.state[p]\n if len(state) == 0:\n state['alpha'] = 1.\n state['init_norm'] = p.norm().item()\n state['step'] = 0\n state['cons_step'] = 0\n # Exponential moving average of gradient values for the weight norms\n state['exp_avg'] = 0\n # Exponential moving average of squared gradient values for the weight norms\n state['exp_avg_sq'] = 0\n state['cons_exp_avg'] = 0\n # state['cons_exp_avg_sq'] = 0\n # if amsgrad:\n # # Maintains max of all exp. moving avg. of sq. grad. values\n # state['max_exp_avg_sq'] = 0\n # alphas.append(state['alpha'])\n\n curr_norm = p.data.norm().item()\n if state['init_norm'] == 0 or curr_norm == 0:\n # pdb.set_trace()\n continue # typical for biases\n\n grad = torch.sum(p.grad * p.data).item() * state['init_norm'] / curr_norm\n # grad_list.append(grad)\n\n if group['grad_clip'] > 0:\n grad = max(min(grad, group['grad_clip']), -group['grad_clip'])\n # Perform stepweight decay\n # if group['weight_decay'] > 0:\n # p.mul_(1 - group['lr'] * group['weight_decay'])\n beta1, beta2 = group['betas']\n if is_constraint:\n state['cons_step'] += 1\n state['cons_exp_avg'] = state['cons_exp_avg'] * beta1 + grad * (1 - beta1)\n # state['cons_exp_avg_sq'] = state['cons_exp_avg_sq'] * beta2 + (grad * grad) * (1 - beta2)\n\n steps = state['cons_step']\n exp_avg = state['cons_exp_avg']\n # exp_avg_sq = state['cons_exp_avg_sq']\n else:\n # pdb.set_trace()\n state['step'] += 1\n state['exp_avg'] = state['exp_avg'] * beta1 + grad * (1 - beta1)\n\n steps = state['step']\n exp_avg = state['exp_avg']\n\n state['exp_avg_sq'] = state['exp_avg_sq'] * beta2 + (grad * grad) * (1 - beta2)\n exp_avg_sq = state['exp_avg_sq']\n\n bias_correction1 = 1 - beta1 ** steps\n bias_correction2 = 1 - beta2 ** (state['cons_step'] + state['step'])\n\n # Decay the first and second moment running average coefficient\n # if amsgrad:\n # # Maintains the maximum of all 2nd moment running avg. till now\n # state['max_exp_avg_sq'] = max(state['max_exp_avg_sq'], state['exp_avg_sq'])\n # # Use the max. for normalizing running avg. of gradient\n # denom = math.sqrt(state['max_exp_avg_sq'] / bias_correction2) + group['eps']\n # else:\n denom = math.sqrt(exp_avg_sq / bias_correction2) + group['eps']\n\n step_size = group['lr'] / bias_correction1\n\n # update the parameter\n state['alpha'] = max(state['alpha'] - step_size * exp_avg / denom, group['min_scale'])\n p.data.mul_(state['alpha'] * state['init_norm'] / curr_norm)\n\n # print(alphas)\n # print(grad_list)\n # print(max(grad_list), min(grad_list), max(alphas), min(alphas))\n # pdb.set_trace()\n return loss\n\n def reset_momentums(self):\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n amsgrad = group['amsgrad']\n\n if len(state) == 0:\n state['alpha'] = 1.\n state['init_norm'] = p.norm().item()\n state['step'] = 0\n # Exponential moving average of gradient values for the weight norms\n state['exp_avg'] = 0\n # Exponential moving average of squared gradient values for the weight norms\n state['exp_avg_sq'] = 0\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = 0\n else:\n state['step'] = 0\n # Exponential moving average of gradient values for the weight norms\n state['exp_avg'] = 0\n # Exponential moving average of squared gradient values for the weight norms\n state['exp_avg_sq'] = 0\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = 0\n" }, { "alpha_fraction": 0.47132429480552673, "alphanum_fraction": 0.5260688066482544, "avg_line_length": 36.24271774291992, "blob_id": "9faf4a5201825e6e3f5b61d50a27c06eeadf8d89", "content_id": "3c4b748f109b57cf527ecd95a28f0e60ff73f292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3836, "license_type": "no_license", "max_line_length": 119, "num_lines": 103, "path": "/models/vgg.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "'''VGG11/13/16/19 in Pytorch.'''\nimport torch\nfrom collections import OrderedDict\n\ncfg = {\n 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\nclass VGG(torch.nn.Module):\n def __init__(self, vgg_name, use_bn=True, use_pt_init=False, init_multip=1, **kwargs):\n super(VGG, self).__init__()\n self.use_bn = use_bn\n self.conv_names = []\n self.bn_names = []\n self._make_layers(cfg[vgg_name])\n self.classifier = torch.nn.Linear(512, 10)\n self.conv_names.append(f'module.classifier.weight')\n if not use_pt_init:\n self._initialize_weights()\n\n if init_multip != 1:\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n m.weight.data *= init_multip\n if m.bias is not None:\n m.bias.data *= init_multip\n elif isinstance(m, torch.nn.BatchNorm2d):\n m.weight.data *= init_multip\n m.bias.data *= init_multip\n elif isinstance(m, torch.nn.Linear):\n m.weight.data *= init_multip\n m.bias.data *= init_multip\n\n def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n out = self.classifier(out)\n return out\n\n def _make_layers(self, cfg):\n # layers = []\n in_channels = 3\n pool_num, block_num = 0, 0\n self.features = torch.nn.Sequential(OrderedDict([]))\n for x in cfg:\n if x == 'M':\n self.features.add_module(f'pool{pool_num}', torch.nn.MaxPool2d(kernel_size=2, stride=2))\n pool_num += 1\n else:\n self.features.add_module(f'conv{block_num}', torch.nn.Conv2d(in_channels, x, kernel_size=3, padding=1))\n if self.use_bn:\n self.features.add_module(f'bn{block_num}', torch.nn.BatchNorm2d(x))\n self.features.add_module(f'relu{block_num}', torch.nn.ReLU(inplace=True))\n in_channels = x\n self.conv_names.append(f'module.features.conv{block_num}.weight')\n self.bn_names.append(f'module.features.bn{block_num}.weight')\n block_num += 1\n\n self.add_module('global_pool', torch.nn.AvgPool2d(kernel_size=1, stride=1))\n\n def _initialize_weights(self) -> None:\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.Linear):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.constant_(m.bias, 0)\n\n def get_plotting_names(self):\n if self.use_bn:\n return {'Linear': self.conv_names,\n 'BN': self.bn_names,}\n else:\n return {'Linear': self.conv_names,}\n\ndef test():\n net = VGG('VGG11')\n x = torch.randn(2,3,32,32)\n y = net(x)\n print(y.size())\n\ndef vgg11(**kwargs):\n return VGG('VGG11', **kwargs)\n\n\ndef vgg13(**kwargs):\n return VGG('VGG13', **kwargs)\n\n\ndef vgg16(**kwargs):\n return VGG('VGG16', **kwargs)\n\ndef vgg19(**kwargs):\n return VGG('VGG19', **kwargs)\n" }, { "alpha_fraction": 0.5641685724258423, "alphanum_fraction": 0.5722901225090027, "avg_line_length": 35.46334457397461, "blob_id": "0e31a129f052c249bce2f6da1432ac900c255872", "content_id": "5dd70ba5658df9704de55634f8e3a4aa3e59e236", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12436, "license_type": "no_license", "max_line_length": 163, "num_lines": 341, "path": "/gradinit_utils.py", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom gradinit_optimizers import RescaleAdam\nfrom models.modules import Scale, Bias\nimport numpy as np\nimport os\n\n\ndef get_ordered_params(net):\n param_list = []\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm2d):\n param_list.append(m.weight)\n if m.bias is not None:\n param_list.append(m.bias)\n elif isinstance(m, Scale):\n param_list.append(m.weight)\n elif isinstance(m, Bias):\n param_list.append(m.bias)\n\n return param_list\n\n\ndef set_param(module, name, alg, eta, grad):\n weight = getattr(module, name)\n # remove this parameter from parameter list\n del module._parameters[name]\n\n # compute the update steps according to the optimizers\n if alg.lower() == 'sgd':\n gstep = eta * grad\n elif alg.lower() == 'adam':\n gstep = eta * grad.sign()\n else:\n raise RuntimeError(\"Optimization algorithm {} not defined!\".format(alg))\n\n # add the updated parameter as the new parameter\n module.register_parameter(name + '_prev', weight)\n\n # recompute weight before every forward()\n updated_weight = weight - gstep.data\n setattr(module, name, updated_weight)\n\n\ndef take_opt_step(net, grad_list, alg='sgd', eta=0.1):\n \"\"\"Take the initial step of the chosen optimizer.\n \"\"\"\n assert alg.lower() in ['adam', 'sgd']\n\n idx = 0\n for n, m in net.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm2d):\n grad = grad_list[idx]\n set_param(m, 'weight', alg, eta, grad)\n idx += 1\n\n if m.bias is not None:\n grad = grad_list[idx]\n set_param(m, 'bias', alg, eta, grad)\n idx += 1\n elif isinstance(m, Scale):\n grad = grad_list[idx]\n set_param(m, 'weight', alg, eta, grad)\n idx += 1\n elif isinstance(m, Bias):\n grad = grad_list[idx]\n set_param(m, 'bias', alg, eta, grad)\n idx += 1\n\n\ndef recover_params(net):\n \"\"\"Reset the weights to the original values without the gradient step\n \"\"\"\n\n def recover_param_(module, name):\n delattr(module, name)\n setattr(module, name, getattr(module, name + '_prev'))\n del module._parameters[name + '_prev']\n\n for n, m in net.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm2d):\n recover_param_(m, 'weight')\n if m.bias is not None:\n recover_param_(m, 'bias')\n elif isinstance(m, Scale):\n recover_param_(m, 'weight')\n elif isinstance(m, Bias):\n recover_param_(m, 'bias')\n\n\ndef set_bn_modes(net):\n \"\"\"Switch the BN layers into training mode, but does not track running stats.\n \"\"\"\n for n, m in net.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n m.training = True\n m.track_running_stats = False\n\n\ndef recover_bn_modes(net):\n for n, m in net.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n m.track_running_stats = True\n\n\ndef get_scale_stats(model, optimizer):\n stat_dict = {}\n # all_s_list = [p.norm().item() for n, p in model.named_parameters() if 'bias' not in n]\n all_s_list = []\n for param_group in optimizer.param_groups:\n for p in param_group['params']:\n all_s_list.append(optimizer.state[p]['alpha'])\n stat_dict['s_max'] = max(all_s_list)\n stat_dict['s_min'] = min(all_s_list)\n stat_dict['s_mean'] = np.mean(all_s_list)\n all_s_list = []\n for n, p in model.named_parameters():\n if 'bias' not in n:\n all_s_list.append(optimizer.state[p]['alpha'])\n stat_dict['s_weight_max'] = max(all_s_list)\n stat_dict['s_weight_min'] = min(all_s_list)\n stat_dict['s_weight_mean'] = np.mean(all_s_list)\n\n return stat_dict\n\n\ndef get_batch(data_iter, data_loader):\n try:\n inputs, targets = next(data_iter)\n except:\n data_iter = iter(data_loader)\n inputs, targets = next(data_iter)\n inputs, targets = inputs.cuda(), targets.cuda()\n return data_iter, inputs, targets\n\n\ndef gradinit(net, dataloader, args):\n if args.gradinit_resume:\n print(\"Resuming GradInit model from {}\".format(args.gradinit_resume))\n sdict = torch.load(args.gradinit_resume)\n net.load_state_dict(sdict)\n return\n\n # if isinstance(net, torch.nn.DataParallel):\n # net_top = net.module\n # else:\n # net_top = net\n\n bias_params = [p for n, p in net.named_parameters() if 'bias' in n]\n weight_params = [p for n, p in net.named_parameters() if 'weight' in n]\n\n optimizer = RescaleAdam([{'params': weight_params, 'min_scale': args.gradinit_min_scale, 'lr': args.gradinit_lr},\n {'params': bias_params, 'min_scale': 0, 'lr': args.gradinit_lr}],\n grad_clip=args.gradinit_grad_clip)\n\n criterion = nn.CrossEntropyLoss()\n\n net.eval() # This further shuts down dropout, if any.\n\n set_bn_modes(net) # Should be called after net.eval()\n\n\n total_loss, total_l0, total_l1, total_residual, total_gnorm = 0, 0, 0, 0, 0\n total_sums, total_sums_gnorm = 0, 0\n cs_count = 0\n total_iters = 0\n obj_loss, updated_loss, residual = -1, -1, -1\n data_iter = iter(dataloader)\n # get all the parameters by order\n params_list = get_ordered_params(net)\n while True:\n eta = args.gradinit_eta\n\n # continue\n # get the first half of the minibatch\n data_iter, init_inputs_0, init_targets_0 = get_batch(data_iter, dataloader)\n\n # Get the second half of the data.\n data_iter, init_inputs_1, init_targets_1 = get_batch(data_iter, dataloader)\n\n init_inputs = torch.cat([init_inputs_0, init_inputs_1])\n init_targets = torch.cat([init_targets_0, init_targets_1])\n # compute the gradient and take one step\n outputs = net(init_inputs)\n init_loss = criterion(outputs, init_targets)\n\n all_grads = torch.autograd.grad(init_loss, params_list, create_graph=True)\n\n # Compute the loss w.r.t. the optimizer\n if args.gradinit_alg.lower() == 'adam':\n # grad-update inner product\n gnorm = sum([g.abs().sum() for g in all_grads])\n loss_grads = all_grads\n else:\n gnorm_sq = sum([g.square().sum() for g in all_grads])\n gnorm = gnorm_sq.sqrt()\n if args.gradinit_normalize_grad:\n loss_grads = [g / gnorm for g in all_grads]\n else:\n loss_grads = all_grads\n\n total_gnorm += gnorm.item()\n total_sums_gnorm += 1\n if gnorm.item() > args.gradinit_gamma:\n # project back into the gradient norm constraint\n optimizer.zero_grad()\n gnorm.backward()\n optimizer.step(is_constraint=True)\n\n cs_count += 1\n else:\n # take one optimization step\n take_opt_step(net, loss_grads, alg=args.gradinit_alg, eta=eta)\n\n total_l0 += init_loss.item()\n\n data_iter, inputs_2, targets_2 = get_batch(data_iter, dataloader)\n if args.batch_no_overlap:\n # sample a new batch for the half\n data_iter, init_inputs_0, init_targets_0 = get_batch(data_iter, dataloader)\n updated_inputs = torch.cat([init_inputs_0, inputs_2])\n updated_targets = torch.cat([init_targets_0, targets_2])\n\n # compute loss using the updated network\n # net_top.opt_mode(True)\n updated_outputs = net(updated_inputs)\n # net_top.opt_mode(False)\n updated_loss = criterion(updated_outputs, updated_targets)\n\n # If eta is larger, we should expect obj_loss to be even smaller.\n obj_loss = updated_loss / eta\n\n recover_params(net)\n optimizer.zero_grad()\n obj_loss.backward()\n optimizer.step(is_constraint=False)\n total_l1 += updated_loss.item()\n\n total_loss += obj_loss.item()\n total_sums += 1\n\n total_iters += 1\n if (total_sums_gnorm > 0 and total_sums_gnorm % 10 == 0) or total_iters == args.gradinit_iters or total_iters == args.gradinit_iters:\n stat_dict = get_scale_stats(net, optimizer)\n print_str = \"Iter {}, obj iters {}, eta {:.3e}, constraint count {} loss: {:.3e} ({:.3e}), init loss: {:.3e} ({:.3e}), update loss {:.3e} ({:.3e}), \" \\\n \"total gnorm: {:.3e} ({:.3e})\\t\".format(\n total_sums_gnorm, total_sums, eta, cs_count,\n float(obj_loss), total_loss / total_sums if total_sums > 0 else -1,\n float(init_loss), total_l0 / total_sums if total_sums > 0 else -1,\n float(updated_loss), total_l1 / total_sums if total_sums > 0 else -1,\n float(gnorm), total_gnorm / total_sums_gnorm)\n\n for key, val in stat_dict.items():\n print_str += \"{}: {:.2e}\\t\".format(key, val)\n print(print_str)\n\n if total_iters == args.gradinit_iters:\n break\n\n recover_bn_modes(net)\n if not os.path.exists('chks'):\n os.makedirs('chks')\n torch.save(net.state_dict(), 'chks/{}_init.pth'.format(args.expname))\n\n\ndef gradient_quotient(loss, params, eps=1e-5):\n grad = torch.autograd.grad(loss, params, create_graph=True)\n prod = torch.autograd.grad(sum([(g**2).sum() / 2 for g in grad]), params,\n create_graph=True)\n out = sum([((g - p) / (g + eps * (2 * (g >= 0).float() - 1).detach())\n - 1).abs().sum() for g, p in zip(grad, prod)])\n\n gnorm = sum([(g**2).sum().item() for g in grad])\n return out / sum([p.data.numel() for p in params]), gnorm\n\n\ndef metainit(net, dataloader, args, experiment=None):\n\n if args.gradinit_resume:\n print(\"Resuming metainit model from {}\".format(args.gradinit_resume))\n sdict = torch.load(args.gradinit_resume)\n net.load_state_dict(sdict)\n return\n\n if isinstance(net, torch.nn.DataParallel):\n net_top = net.module\n else:\n net_top = net\n\n bias_params = [p for n, p in net.named_parameters() if 'bias' in n]\n weight_params = [p for n, p in net.named_parameters() if 'weight' in n]\n\n\n optimizer = RescaleAdam([{'params': weight_params, 'min_scale': args.gradinit_min_scale, 'lr': args.gradinit_lr},\n {'params': bias_params, 'min_scale': 0, 'lr': args.gradinit_lr}],\n grad_clip=args.gradinit_grad_clip)\n\n criterion = nn.CrossEntropyLoss()\n\n set_bn_modes(net)\n net.eval()\n # get all the parameters by order\n params_list = get_ordered_params(net)\n\n total_gq_loss = 0\n total_gnorm = 0\n for ite, (inputs, targets) in enumerate(dataloader):\n optimizer.zero_grad()\n\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n gq, gnorm = gradient_quotient(loss, params_list, eps=1e-5)\n gq.backward()\n\n total_gq_loss += gq.item()\n total_gnorm += gnorm\n optimizer.step()\n\n if ite % 10 == 0 or ite == args.gradinit_iters - 1 or ite == len(dataloader) - 1:\n stat_dict = get_scale_stats(net, optimizer)\n print_str = \"Iter {}, gq {:.3e} ({:.3e}), gnorm {:.3e} ({:.3e}), loss {:.3e}\\t\".format(\n ite, gnorm, total_gnorm / (ite + 1), gq.item(), total_gq_loss / (ite + 1), loss.item())\n\n if experiment is not None:\n experiment.log_metric(\"gq\", gq.item(), ite)\n experiment.log_metric(\"init_loss\", loss.item(), ite)\n experiment.log_metric(\"gnorm\", gnorm, ite)\n for key, val in stat_dict.items():\n experiment.log_metric(key, val, ite)\n # torch.save(net.state_dict(), 'chks/{}_init.pth'.format(args.expname))\n\n for key, val in stat_dict.items():\n print_str += \"{}: {:.2e}\\t\".format(key, val)\n print(print_str)\n\n recover_bn_modes(net)\n if not os.path.exists('chks'):\n os.makedirs('chks')\n torch.save(net.state_dict(), 'chks/{}_init.pth'.format(args.expname))\n\n\n" }, { "alpha_fraction": 0.5494636297225952, "alphanum_fraction": 0.600715160369873, "avg_line_length": 24.393939971923828, "blob_id": "2cf2852ae5386941370f8b6736ddc6e035de683b", "content_id": "c471e664b2767de4eb90a98fce5b2dd3e4458b29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 839, "license_type": "no_license", "max_line_length": 94, "num_lines": 33, "path": "/launch/run_gradinit_densenet.sh", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nfunction runexp {\n\ngpu=${1}\narch=${2}\nalg=${3}\nglr=${4}\niters=${5}\nflags=${6}\n\nflags_print=\"$(echo -e \"${flags}\" | tr -d '[:space:]')\"\nflags_print=${flags_print//--/_}\n\nexpname=gradinit-${arch}-cifar10-cutout-alg_${alg}-glr_${glr}-i_${iters}-sgclip_${flags_print}\n\ncmd=\"\nCUDA_VISIBLE_DEVICES=${gpu}\npython train_cifar.py --arch ${arch} --cutout --batchsize 64\n --gradinit --gradinit-alg ${alg} --gradinit-eta 1e-1\n --gradinit-gamma 1 --gradinit-normalize-grad\n --gradinit-lr ${glr} --gradinit-min-scale 0.01\n --gradinit-iters ${iters} --gradinit-grad-clip 1\n --expname ${expname} ${flags}\n\"\n\neval ${cmd}\n\n}\n\n# runexp gpu arch alg glr iters flags\nrunexp 0 densenet100 sgd 5e-3 780 \"--no_bn --seed 1234\"\nrunexp 0 densenet100 sgd 1e-2 780 \"--seed 1234\"\n\n" }, { "alpha_fraction": 0.5624212026596069, "alphanum_fraction": 0.6015132665634155, "avg_line_length": 23.78125, "blob_id": "f5d9c25ab40a3eebf8c7a735e78706302ecfe40c", "content_id": "d8d9de89c213ca966cae929a13af89ac0d097207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 793, "license_type": "no_license", "max_line_length": 92, "num_lines": 32, "path": "/launch/run_gradinit_wrn.sh", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nfunction runexp {\n\ngpu=${1}\narch=${2}\nalg=${3}\nglr=${4}\niters=${5}\nflags=${6}\n\nflags_print=\"$(echo -e \"${flags}\" | tr -d '[:space:]')\"\nflags_print=${flags_print//--/_}\n\nexpname=gradinit-${arch}-cifar10-cutout-mixup-alg_${alg}-glr_${glr}-i_${iters}${flags_print}\n\ncmd=\"\nCUDA_VISIBLE_DEVICES=${gpu}\npython train_cifar.py --arch ${arch} --cutout --train-loss mixup\n --gradinit --gradinit-alg ${alg} --gradinit-eta 0.1\n --gradinit-gamma 1 --gradinit-normalize-grad\n --gradinit-lr ${glr} --gradinit-min-scale 0.01\n --gradinit-iters ${iters} --gradinit-grad-clip 1\n --expname ${expname} ${flags}\n\"\n\neval ${cmd}\n\n}\n\n# runexp gpu arch alg glr iters flags\nrunexp 0 wrn_28_10 sgd 3e-3 780 \"--gradinit-bsize 64 --seed 4096\"\n" }, { "alpha_fraction": 0.754807710647583, "alphanum_fraction": 0.7865384817123413, "avg_line_length": 114.66666412353516, "blob_id": "c734febbd4eb3e7457014bac7cd20fd7cac6f205", "content_id": "b74979a69922fbca261386baccb523c668fd4faa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 460, "num_lines": 9, "path": "/README.md", "repo_name": "ankitshah009/gradinit", "src_encoding": "UTF-8", "text": "# GradInit\nThis repository hosts the code for experiments in the paper, [GradInit: Learning to Initialize Neural Networks for Stable and Efficient Training](http://arxiv.org/abs/2102.08098). \n \nScripts for experiments on CIFAR-10 is currently available. Please refer to `launch/run_gradinit_densenet.sh` for DenseNet-100, `launch/run_gradinit_wrn.sh` for WRN-28-10, and `launch/run_gradinit.sh` for other networks shown in the paper. We will release the code for ImageNet and IWSLT experiments soon. \n\n## Notes\n**Feb 17, 2021**: Releasing the code for training CNNs on CIFAR-10.\n\n**March 9, 2021**: Update the code to support any architecture with only `nn.Conv2d`, `nn.Linear` and `nn.BatchNorm2d` as the parameterized layers. Simply call `gradinit_utils.gradinit` before your training loop. Further extensions to other parameterized layers can be achieved by modifying `gradinit_utils.gradinit.get_ordered_params`, `gradinit_utils.take_opt_step` and `gradinit_utils.gradinit.recover_params` to iterate over all parameters of these layers." } ]
11
jsrimr/youngjin_django
https://github.com/jsrimr/youngjin_django
04f5f346f8924b550e7fb461e9600dc54a6978e2
ae6a6bf44264f0fcf0a064eff40ef0206071237f
cb4f30cde14f27eed2b0d9ee4a7f11d558df17c7
refs/heads/master
2023-08-02T15:07:17.955843
2020-04-10T11:34:44
2020-04-10T11:34:44
254,618,998
1
0
null
2020-04-10T11:33:49
2020-04-15T11:48:30
2021-09-22T18:51:51
Python
[ { "alpha_fraction": 0.47640541195869446, "alphanum_fraction": 0.48502257466316223, "avg_line_length": 33.33802795410156, "blob_id": "df474fadb3e16dbbf477cc09ae06e24f74672615", "content_id": "957b7676e193e682dd88270bfea0cbeee5b62f04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2451, "license_type": "no_license", "max_line_length": 170, "num_lines": 71, "path": "/view_page/templates/view_page/list_logs_temp.html", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block content %}\n\n\n<div class=\"content_wrap\">\n\n<form action=\"/view_page/list_logs/\" method=\"get\">\n {% csrf_token %}\n <fieldset>\n <label> 기간 : </label>\n <input id=\"fromDate\" data-provide=\"datepicker\" data-date-format=\"yyyy-mm-dd\" name=\"from_date\" value=\"{{ from_date }}\" formaction=\"/view_page/export_sales_csv/\" />\n <label> ~ </label>\n <input id=\"toDate\" data-provide=\"datepicker\" data-date-format=\"yyyy-mm-dd\" name=\"to_date\" value=\"{{ to_date }}\" formaction=\"/view_page/export_sales_csv/\" />\n <label>검색어\n <input type=\"text\" name=\"search_keyword\" value=\"{{ keyword }}\" formaction=\"/view_page/export_sales_csv/\" />\n </label>\n <button type=\"submit\">검색</button>\n </fieldset>\n\n</form>\n\n <table class = \"tg\">\n <colgroup>\n <col width=\"10%\">\n <col width=\"10%\">\n <col width=\"10%\">\n <col width=\"35%\">\n <col width=\"35%\">\n </colgroup>\n\n <tr>\n <th class=\"tg-21xh\"> DATE </th>\n <th class=\"tg-21xh\"> TIME </th>\n <th class=\"tg-21xh\"> USER_ID </th>\n <th class=\"tg-21xh\"> Q </th>\n <th class=\"tg-21xh\"> A </th>\n </tr>\n\n {% for log in log_list %}\n <tr>\n <td>{{ log.log_date }}</td>\n <td>{{ log.log_date }}</td>\n <td>{{ log.log_time }}</td>\n <td>{{ log.log_userid }}</td>\n <td>{{ log.log_question }}</td>\n <td>{{ log.log_answer }}</td>\n </tr>\n {% endfor %}\n </table>\n\n <div class=\"pagination\">\n <span class=\"step-links\">\n {% if logs.has_previous %}\n <a href=\"?page=1&search_type={{ type }}&search_keyword={{ keyword }}\">&laquo; first</a>\n <a href=\"?page={{ logs.previous_page_number }}&search_type={{ type }}&search_keyword={{ keyword }}\">previous</a>\n {% endif %}\n\n <span class=\"current\">\n Page {{ logs.number }} of {{ logs.paginator.num_pages }}.\n </span>\n\n {% if logs.has_next %}\n <a href=\"?page={{ logs.next_page_number }}&search_type={{ type }}&search_keyword={{ keyword }}\">next</a>\n <a href=\"?page={{ logs.paginator.num_pages }}&search_type={{ type }}&search_keyword={{ keyword }}\">last &raquo;</a>\n {% endif %}\n </span>\n </div>\n</div>\n\n{% endblock %}" }, { "alpha_fraction": 0.5661914348602295, "alphanum_fraction": 0.5745417475700378, "avg_line_length": 31.223684310913086, "blob_id": "c9924d2fa94320bce092514b893ad4c7e122f2b9", "content_id": "33830bab7580609b099d34ea73739d3f33026856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4910, "license_type": "no_license", "max_line_length": 119, "num_lines": 152, "path": "/view_page/views_temp.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "\n\n\n\n\n\n\n\n\n\n\n\nfrom __future__ import unicode_literals\nfrom .models import Log\n\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render\nfrom django.db.models import Q\n\nfrom django.views import generic\n\nfrom braces.views import SelectRelatedMixin\n\n# Exporting csv files\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nimport csv\n\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom . import models\nfrom django.http import Http404\n\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n# Create your views here.\n\nclass listlogs(generic.ListView):\n\n model = models.Log\n template_name = 'view_page/list_logs.html'\n\n def get_queryset(self):\n try:\n search_keyword = self.request.GET.get('search_keyword')\n from_date = self.request.GET.get('from_date')\n to_date = self.request.GET.get('to_date')\n\n if to_date is None:\n to_date = '2999-01-01'\n if from_date is None:\n from_date = '2010-01-01'\n if search_keyword is None:\n search_keyword = \"\"\n\n self.log_user = User.objects.prefetch_related('view_page').get(dpt_id__iexact = self.kwargs.get('dpt_id'))\\\n .order_by('pk') \\\n .filter(log_date__range=[from_date, to_date]) \\\n .filter(Q(log_question__contains=search_keyword)\n | Q(log_answer__contains=search_keyword))\n except:\n raise Http404\n else:\n return self.log_user.view_page.all()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['log_user'] = self.log_user\n return context\n\n\n@csrf_exempt\ndef export_sales_csv(request):\n\n to_date = request.GET.get('to_date')\n from_date = request.GET.get('from_date')\n #$('.date').val()\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"log.csv\"'\n writer = csv.writer(response)\n writer.writerow(['A', 'B', 'C', 'D', 'E'])\n log_list = Log.objects.all().values_list('id', 'log_date', 'log_time', 'log_userid', 'log_question', 'log_answer')\\\n .filter(log_date__range=[from_date, to_date], log_userid = 'a')\n for log in log_list:\n writer.writerow(log)\n return response\n\n\n\n\nfrom __future__ import unicode_literals\nfrom .models import Log\n\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render\nfrom django.db.models import Q\n\n# Exporting csv files\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nimport csv\n\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom . import models\n\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n\n# Create your views here.\n@csrf_exempt\ndef listlogs(request):\n\n search_keyword = request.GET.get('search_keyword')\n to_date = request.GET.get('to_date')\n from_date = request.GET.get('from_date')\n if to_date is None:\n to_date = '2999-01-01'\n if from_date is None:\n from_date = '2010-01-01'\n if search_keyword is None:\n search_keyword = \"\"\n\n if len(search_keyword) > 0:\n log_list = Log.objects.order_by('pk')\\\n .filter(log_date__range=[from_date, to_date])\\\n .filter(Q(log_question__contains=search_keyword)\n | Q(log_answer__contains=search_keyword))\n else:\n search_type = '1'\n search_keyword = ''\n log_list = Log.objects.filter(log_date__range=[from_date, to_date])\n\n paginator = Paginator(log_list, 5)\n page = request.GET.get('page')\n logs = paginator.get_page(page)\n return render(request, 'view_page/list_logs.html', {'log_list': logs,\n 'keyword': search_keyword,\n 'to_date': to_date,\n 'from_date': from_date\n })\n\n\n\n\n@csrf_exempt\ndef export_sales_csv(request):\n\n to_date = request.GET.get('to_date')\n from_date = request.GET.get('from_date')\n #$('.date').val()\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"log.csv\"'\n writer = csv.writer(response)\n writer.writerow(['A', 'B', 'C', 'D', 'E'])\n log_list = Log.objects.all().values_list('id', 'log_date', 'log_time', 'log_userid', 'log_question', 'log_answer')\\\n .filter(log_date__range=[from_date, to_date], log_userid = 'a')\n for log in log_list:\n writer.writerow(log)\n return response\n" }, { "alpha_fraction": 0.4952561557292938, "alphanum_fraction": 0.5597723126411438, "avg_line_length": 22.954545974731445, "blob_id": "41f180c9eee9fbdff701ee7cd0a87f36f837ae7f", "content_id": "b816809cf624caf593d67c3545ce5583cbd9899b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "no_license", "max_line_length": 96, "num_lines": 22, "path": "/accounts/migrations/0003_auto_20200408_2116.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.5 on 2020-04-08 21:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0002_auto_20200408_1109'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='user',\n name='dpt_id',\n ),\n migrations.AddField(\n model_name='user',\n name='dpt',\n field=models.CharField(default='', max_length=255, unique=True, verbose_name='dpt'),\n ),\n ]\n" }, { "alpha_fraction": 0.5656934380531311, "alphanum_fraction": 0.7372262477874756, "avg_line_length": 20.076923370361328, "blob_id": "c20e1ea0733045cda1228b58bb995a028a69ff49", "content_id": "a82af8e018ecd7f70a3a24cab30ffbfa739f46ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 274, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/requirements.txt", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "asgiref==3.2.7\nDjango==3.0.5\ndjango-bootstrap-modal-forms==1.5.0\ndjango-bootstrap3==12.0.3\ndjango-braces==1.14.0\ndjango-mathfilters==1.0.0\ndjango-widget-tweaks==1.4.8\ndjangorestframework==3.11.0\nmysqlclient==1.4.6\npytz==2019.3\nsimplejson==3.17.0\nsix==1.14.0\nsqlparse==0.3.1\n" }, { "alpha_fraction": 0.6072851419448853, "alphanum_fraction": 0.6107000708580017, "avg_line_length": 24.852941513061523, "blob_id": "9d23c83bb2432e656fd820313a85fc64ed9e94e9", "content_id": "f1a52b9f24e6dc539ad03fb9140e92384d69346e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1757, "license_type": "no_license", "max_line_length": 92, "num_lines": 68, "path": "/accounts/models.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib import auth\n\nfrom django.contrib.auth.models import (BaseUserManager, AbstractBaseUser, PermissionsMixin)\nfrom django.contrib.auth import get_user_model\n\n# Create your models here.\n\n# class User(auth.models.User, auth.models.PermissionsMixin):\n# def __str__(self):\n# return \"@{}\".format(self.username)\n\n\nclass UserManager(BaseUserManager):\n def create_user(self, dpt, dpt_code, password=None):\n if not dpt:\n raise ValueError('Users must have an id')\n\n user = self.model(\n dpt= dpt,\n dpt_code=dpt_code,\n )\n user.set_password(password)\n user.is_admin = False\n user.is_active = True\n user.save(using=self._db)\n return user\n\n def create_superuser(self, dpt, dpt_code, password):\n user = self.create_user(\n dpt = dpt,\n password=password,\n dpt_code=dpt_code,\n )\n user.is_admin = True\n user.is_active = True\n user.save(using=self._db)\n return user\n\n\nclass User(AbstractBaseUser,PermissionsMixin):\n dpt = models.CharField(\n verbose_name='dpt',\n max_length=255,\n unique=True,\n default= ''\n )\n dpt_code = models.CharField(max_length=255,unique=False, default = '')\n is_active = models.BooleanField(default=True)\n is_admin = models.BooleanField(default=False)\n\n objects = UserManager()\n\n USERNAME_FIELD = 'dpt'\n REQUIRED_FIELDS = ['dpt_code']\n\n def __str__(self):\n return self.dpt\n\n def has_perm(self, perm, obj=None):\n return True\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_staff(self):\n return self.is_admin" }, { "alpha_fraction": 0.6659663915634155, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 25.5, "blob_id": "83fe71f8e8f70924a1c764bb965f5893b1b4da83", "content_id": "ef8f5628d93fc0d426411a322e7cb57027c3aa19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 88, "num_lines": 18, "path": "/groups/models.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\n\nfrom django.contrib.auth import get_user_model\nUser = get_user_model() # This allows you to get the current user logged into a session.\n\n\nclass Group(models.Model):\n dpt_code = models.CharField(max_length=255, unique= True, default = '')\n dpt_name = models.CharField(max_length=255, unique=True, default = '')\n\n def __str__(self):\n return self.dpt_name\n\n class Meta:\n ordering = ['dpt_code']" }, { "alpha_fraction": 0.5989201664924622, "alphanum_fraction": 0.6070188879966736, "avg_line_length": 30.621952056884766, "blob_id": "a2f7c2a460c368e8856d2de7733affb42ac73792", "content_id": "bf74100bce13a5d0d706a6b8ff83557025ee2159", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2593, "license_type": "no_license", "max_line_length": 120, "num_lines": 82, "path": "/view_page/views.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport csv\n\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import Log\n\n# Exporting csv files\nUser = get_user_model()\n\nfrom django.views import generic\n\nfrom . import models\n\n\n# Create your views here.\n\nclass listlogs(generic.ListView):\n model = models.Log\n print(\"LOADING MODEL ?\")\n template_name = 'view_page/list_logs.html'\n\n def get_queryset(self):\n try:\n user = self.request.user\n\n self.search_keyword = self.request.GET.get('search_keyword')\n self.from_date = self.request.GET.get('from_date')\n self.to_date = self.request.GET.get('to_date')\n\n if self.to_date is None:\n self.to_date = '2999-01-01'\n if self.from_date is None:\n self.from_date = '2010-01-01'\n if self.search_keyword is None:\n self.search_keyword = \"\"\n\n self.log_user = User.objects.all()\n\n except:\n print(\"Replace this line with raise Http404\")\n else:\n print(\"ABOUT to FAIL\")\n\n # print(\"DDD\", self.get('dpt'))\n\n print(\"THIS IS IT1\", self.kwargs.get('dpt'))\n self.log_user = User.objects.prefetch_related('view_page').get(dpt__iexact=user.dpt)\n # self.log_user = User.objects.prefetch_related('view_page').get(dpt__iexact=self.kwargs.get('dpt'))\n\n print(\"THIS IS IT2\", self.log_user)\n print(\"OMG IM GONNA CRY\", self.log_user.view_page.all())\n\n return self.log_user.view_page.all()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['log_user'] = self.log_user\n context['to_date'] = self.to_date\n context['from_date'] = self.from_date\n context['search_keyword'] = self.search_keyword\n return context\n\n\n@csrf_exempt\ndef export_sales_csv(request):\n to_date = request.GET.get('to_date')\n from_date = request.GET.get('from_date')\n # $('.date').val()\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"log.csv\"'\n writer = csv.writer(response)\n writer.writerow(['A', 'B', 'C', 'D', 'E'])\n log_list = Log.objects.all().values_list('id', 'log_date', 'log_time', 'log_userid', 'log_question', 'log_answer') \\\n .filter(log_date__range=[from_date, to_date], log_userid='a')\n for log in log_list:\n writer.writerow(log)\n return response\n" }, { "alpha_fraction": 0.7118055820465088, "alphanum_fraction": 0.7152777910232544, "avg_line_length": 32.230770111083984, "blob_id": "e416842d1dc68fe6fc193e2104054a4270d52a20", "content_id": "fcf43ad3c3d5c779d637ab84ee01c124ef49f216", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 872, "license_type": "no_license", "max_line_length": 102, "num_lines": 26, "path": "/view_page/models.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.utils.timezone import now\nfrom django.db import models\n\nfrom groups.models import Group\n\nfrom django.contrib.auth import get_user_model\nUser = get_user_model() # This allows you to get the current user logged into a session.\n\n\n# Create your models here.\n\nclass Log(models.Model):\n\n group = models.ForeignKey(Group, related_name='view_page', on_delete=models.CASCADE, default = '')\n user = models.ForeignKey(User, related_name='view_page', on_delete=models.CASCADE, default = '')\n\n log_date = models.DateTimeField(default=now)\n log_time = models.DateTimeField(default=now)\n log_userid = models.CharField(max_length=60)\n log_question = models.TextField(default='질문')\n log_answer = models.TextField(default='답변')\n\n def __str__(self):\n return self.log_question\n" }, { "alpha_fraction": 0.5584256052970886, "alphanum_fraction": 0.5793358087539673, "avg_line_length": 30.269229888916016, "blob_id": "0f3799be71808e4d046040a2010d58f3c75d2261", "content_id": "5a773deafb8a3d5d4125ab625dc302a0721aeb78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "no_license", "max_line_length": 114, "num_lines": 26, "path": "/view_page/migrations/0001_initial.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.5 on 2020-04-07 04:49\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Log',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('log_date', models.DateField(default=django.utils.timezone.now)),\n ('log_time', models.TimeField(default=django.utils.timezone.now)),\n ('log_userid', models.CharField(max_length=60)),\n ('log_question', models.TextField(default='질문')),\n ('log_answer', models.TextField(default='답변')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5435779690742493, "alphanum_fraction": 0.6146789193153381, "avg_line_length": 21.947368621826172, "blob_id": "67fc131711e2e37033bc3d91a294a627b77937ae", "content_id": "6bb5bd6c4a00717d94f8a751558dcb70a025f311", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 70, "num_lines": 19, "path": "/view_page/migrations/0005_auto_20200410_2018.py", "repo_name": "jsrimr/youngjin_django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.5 on 2020-04-10 20:18\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('view_page', '0004_auto_20200408_1741'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='log',\n name='log_time',\n field=models.DateField(default=django.utils.timezone.now),\n ),\n ]\n" } ]
10
pradeeshnair/ai-product-recommend
https://github.com/pradeeshnair/ai-product-recommend
6c1cb0ae2dafa203ce1b33c79409efe9fb538150
3ea574b84b2c345a4aca4c3360b7425d6eb1882b
8b30af944b39b5bdfcd6f595b49f5788a647db3a
refs/heads/master
2022-12-01T21:12:39.161262
2020-08-19T18:31:47
2020-08-19T18:31:47
288,807,043
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5487682223320007, "alphanum_fraction": 0.5789341330528259, "avg_line_length": 35.16363525390625, "blob_id": "39f012fb175af0420aa17306ba2f2f98885cd247", "content_id": "eb34cba599f3b48c61c7e1b8a35482852ba323a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3978, "license_type": "no_license", "max_line_length": 108, "num_lines": 110, "path": "/app.py", "repo_name": "pradeeshnair/ai-product-recommend", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom flask import Flask, jsonify, request, render_template\nimport joblib\nimport traceback\n# load model\nmodel = joblib.load('model.pkl')\nmodel_columns = joblib.load('model_columns.pkl')\n# app\napp = Flask(__name__)\n\n# routes\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n try:\n # Uncomment to support JSON\n #data = request.get_json(force=True)\n\n # To Support form data from HTML\n data = request.form.to_dict(flat=False)\n # convert data into dataframe\n data.update((x, y) for x, y in data.items())\n\n df = pd.DataFrame.from_dict(data)\n print(df.head())\n print(df.info())\n # datatype convertion\n df['AGE'] = pd.to_numeric(df['AGE'], errors='coerce')\n df['HEIGHT'] = pd.to_numeric(df['HEIGHT'], errors='coerce')\n df['WEIGHT'] = pd.to_numeric(df['WEIGHT'], errors='coerce')\n\n print(df.info())\n # Combine Country of residence and country of destination to form a relationship\n df['COUNTRY_REL'] = df['COUNTRY_RES'] + '_'+df['COUNTRY_DES']\n\n # We will create the following bins: AGE_GROUP, HEIGHT, WEIGHT\n\n # AGE_GROUP\n bins_age_group = [10, 20, 30, 40, 60, 70, 80]\n bin_labels_age_group = ['<20', '20-29',\n '30-39', '40-59', '60-79', '>80']\n df['AGE_GROUP'] = pd.cut(\n df.AGE, bins_age_group, right=False, labels=bin_labels_age_group)\n\n # HEIGHT\n bins_height = [150, 160, 170, 180, 190, 200]\n bin_labels_height = ['<160', '160-169', '170-179', '180-189', '>190']\n df['HEIGHT_GROUP'] = pd.cut(\n df.HEIGHT, bins_height, right=False, labels=bin_labels_height)\n\n # WEIGHT\n bins_weight = [40, 50, 60, 70, 80, 90]\n bin_labels_weight = ['<50', '50-59', '60-69', '70-79', '>80']\n df['WEIGHT_GROUP'] = pd.cut(\n df.WEIGHT, bins_weight, right=False, labels=bin_labels_weight)\n\n # One hot encoding for GENDER\n one_hot_gender = pd.get_dummies(df.GENDER, prefix='GENDER')\n df = df.join(one_hot_gender)\n # One hot encoding for SMOKE_STATUS\n one_hot_smoke = pd.get_dummies(df.SMOKE_STATUS, prefix='SMOKE_STATUS')\n df = df.join(one_hot_smoke)\n # One hot encoding for AGE_GROUP\n one_hot_age_group = pd.get_dummies(df.AGE_GROUP, prefix='AGE_GROUP')\n df = df.join(one_hot_age_group)\n # One hot encoding for HEIGHT_GROUP\n one_hot_height_group = pd.get_dummies(\n df.HEIGHT_GROUP, prefix='HEIGHT_GROUP')\n df = df.join(one_hot_height_group)\n # One hot encoding for WEIGHT_GROUP\n one_hot_weight_group = pd.get_dummies(\n df.WEIGHT_GROUP, prefix='WEIGHT_GROUP')\n df = df.join(one_hot_weight_group)\n # One hot encoding for COUNTRY_REL\n one_hot_country_rel = pd.get_dummies(\n df.COUNTRY_REL, prefix='COUNTRY_REL')\n df = df.join(one_hot_country_rel)\n\n df = df.drop('COUNTRY_REL', axis=1)\n df = df.drop('WEIGHT_GROUP', axis=1)\n df = df.drop('HEIGHT_GROUP', axis=1)\n df = df.drop('AGE_GROUP', axis=1)\n df = df.drop('SMOKE_STATUS', axis=1)\n df = df.drop('GENDER', axis=1)\n df = df.drop('AGE', axis=1)\n df = df.drop('HEIGHT', axis=1)\n df = df.drop('WEIGHT', axis=1)\n df = df.drop('COUNTRY_RES', axis=1)\n df = df.drop('COUNTRY_DES', axis=1)\n\n df = df.reindex(columns=model_columns, fill_value=0)\n\n prediction = list(model.predict(df))\n\n print(prediction)\n # Uncomment to return JSON\n # return jsonify({'prediction': str(prediction)})\n return render_template('index.html', prediction_text='Recommended products : {}'.format(prediction))\n\n except:\n\n return jsonify({'trace': traceback.format_exc()})\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 22, "blob_id": "6dd66e26a1b588f5b92c5f7368727fe9e15ce1c9", "content_id": "c9e3effd5e926c5fdd01f7e96154c3a2cd1ac59c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/README.md", "repo_name": "pradeeshnair/ai-product-recommend", "src_encoding": "UTF-8", "text": "# ai-product-recommend\n" } ]
2
KtlTheBest/secret_letter_sender
https://github.com/KtlTheBest/secret_letter_sender
5150cc0422889d3ad5d9cda5d86dd39475726c3c
e1f4cdf31fde58b0591509611705959b2d40ca34
5accb1e02229881974275963104e113436c3b485
refs/heads/master
2023-02-04T03:49:52.188353
2020-12-27T15:33:57
2020-12-27T15:33:57
324,790,598
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7236841917037964, "alphanum_fraction": 0.7236841917037964, "avg_line_length": 14.199999809265137, "blob_id": "4c42f8ffeb43dc6af9fafd930e45d38a53e899bd", "content_id": "2827a1813d86efd60c9dfcf24680b2a2a1c27c1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/config.py", "repo_name": "KtlTheBest/secret_letter_sender", "src_encoding": "UTF-8", "text": "import logging\n\nBOTTOKEN = \"\"\nWORKDIR = \"work\"\nlogging_level = logging.INFO\n" }, { "alpha_fraction": 0.7201834917068481, "alphanum_fraction": 0.7201834917068481, "avg_line_length": 26.25, "blob_id": "8971104a6eb63accc807e85bb37e2b62a8fbe26e", "content_id": "743ab8bf7116d11ae12e782447960bbbfa12c72b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 110, "num_lines": 8, "path": "/logger.py", "repo_name": "KtlTheBest/secret_letter_sender", "src_encoding": "UTF-8", "text": "import logging\nimport config\n\nlogging.basicConfig(level=config.logging_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\ndef getLogger(name):\n logger = logging.getLogger(name)\n return logger\n" }, { "alpha_fraction": 0.6094771027565002, "alphanum_fraction": 0.6094771027565002, "avg_line_length": 47.959999084472656, "blob_id": "1f203e5d8fa407100528ad54ed52f461f0f49616", "content_id": "7c600d06516de47090b94ca210e5948886656d5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2448, "license_type": "no_license", "max_line_length": 99, "num_lines": 50, "path": "/messages.py", "repo_name": "KtlTheBest/secret_letter_sender", "src_encoding": "UTF-8", "text": "START = \"Hello and welcome to the Anonymous Secret letter sender\\\\.\\n\" + \\\n \"\\n\" + \\\n \"The concept of the bot is pretty simple\\\\. \" + \\\n \"All you do is choose the person whom you want to send a secret message \" + \\\n \"and then you'll just write the message to me\\\\.\\n\" + \\\n \"\\n\" + \\\n \"You shouldn't worry about the messages leaking somewhere \\\\(from the bot's side\\\\) \\n\" + \\\n \"Oracle has implemented an encryption mechanism \" + \\\n \"so he wouldn't be able to read your messages\\\\.\\n\" + \\\n \"So feel free to use it\\\\. \\n\" + \\\n \"\\n\" + \\\n \"There are some restrictions, however\\\\. You can only message people \" + \\\n \"who have pressed a 'start' from the bot\\\\. \" + \\\n \"You'll be notified, however, if somebody connects to the bot\\\\. \" + \\\n \"\\\\(The same goes to you and the others users know that you started using bot\\\\)\\\\.\" + \\\n \"\\n\" + \\\n \"With that being said, the bot should be intuitive to use\\\\. \" + \\\n \"We're almost there\\\\.\\n\" + \\\n \"\\n\" + \\\n \"Please, enter your custom encryption key\\\\.\"\n\nHELP = \"Usage:\\n\" + \\\n \"/help \\\\- prints this usage manual\\n\" + \\\n \"/list \\\\- see the list of users whom you can send letters\\n\" + \\\n \"/letter \\\\- choose recipient and write a letter\\n\" + \\\n \"/delete \\\\- choose recipient and delete the letter to that person\\n\" + \\\n \"/cancel \\\\- Cancel the ongoing operation\"\n\nPROMPT_TO_GET_RECIPIENT = \"Please, chooose the recipient from the list \" + \\\n \"and then we will start composing a letter\\\\.\"\n\nPROMPT_TO_GET_LETTER = \"Nice, now send me a letter as a message, that you want to send\\\\.\"\n\nTRY_AGAIN_RECIPIENT = \"Something went wrong, please try choosing the recipient again\\\\.\"\n\nGET_MESSAGE = \"Nice, I've saved your message\\\\. If you change your mind for some reason, \" + \\\n \"you can delete it with a /delete command\\\\.\\n\\n\" + \\\n \"Now, to get help, enter the /help command\\\\.\"\n\nSTART_DELETE = \"Okay, choose the person to whom you want to delete the letter.\"\n\nNO_MESSAGES_TO_DELETE = \"There are no letters to delete\\\\.\\\\.\\\\.\"\n\nTRY_AGAIN_DELETE_RECIPIENT = \"Something went wrong, please try choosing the recipient again\\\\.\"\n\nCONFIRM_DELETE = \"Are you sure that you want to delete the letter\\\\?\"\n\nDIDNT_DELETE = \"Good\\\\. The message will stay until further actions\"\n\nDELETED_LETTER = \"The message is deleted\"\n" }, { "alpha_fraction": 0.6301731467247009, "alphanum_fraction": 0.6334018111228943, "avg_line_length": 29.972726821899414, "blob_id": "06e3334f8dfd337e41a575ed5dffa736d7f3263f", "content_id": "3bec2c6a4d02bfea02140f3b12981bc5476108d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10221, "license_type": "no_license", "max_line_length": 170, "num_lines": 330, "path": "/main.py", "repo_name": "KtlTheBest/secret_letter_sender", "src_encoding": "UTF-8", "text": "import os\nimport config\nimport messages\nimport logger\nimport re\nimport base64\nimport schedule\nimport datetime\nimport time\nimport threading\n\nfrom cryptography.fernet import Fernet\n\nfrom telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n MessageHandler,\n Filters,\n ConversationHandler,\n CallbackContext,\n)\n\nlog = logger.getLogger(__name__)\nbot = None\n\nactive_users = []\nusers_data = []\nusers_keys = {}\n\nHELP, LIST, GET_KEY, START_GET_RECIPIENT, GET_RECIPIENT, GET_LETTER, START_DELETE, GET_DELETE_RECIPIENT, CONFIRM_DELETE = range(9)\n\ndef setup_workdir():\n try:\n os.mkdir(config.WORKDIR)\n except FileExistsError:\n pass\n\ndef scheduled_job():\n today = datetime.date.today()\n if today.day != 31 or today.month != 12:\n return\n\n sendout_and_delete_messages()\n\ndef sendout_and_delete_messages():\n for root, dirs, files in os.walk(config.WORKDIR):\n for fname in files:\n if fname.startswith(\"text\"):\n with open(os.path.join(root, fname), 'rb') as f:\n content = f.read()\n match = re.compile(r'text(.+?)#(.+?).enc').match(fname)\n From = match.group(1)\n To = match.group(2)\n content = \"Message from anonymous:\\n\\n\" + decrypt_content(content, users_keys[From])\n sendMessageViaUserId(int(To), content)\n log.info(\"Sent letter from {} to {}\".format(From, To))\n\n for root, dirs, files in os.walk(config.WORKDIR):\n for fname in files:\n if os.path.exists(os.path.join(root, fname)):\n os.remove(os.path.join(root, fname))\n\ndef encrypt_message(msg, key):\n msg = msg.encode('utf-8')\n f = Fernet(key)\n new_message = f.encrypt(msg)\n\n return new_message\n\ndef decrypt_content(msg, key):\n f = Fernet(key)\n new_message = f.decrypt(msg)\n\n return new_message.decode('utf-8')\n\ndef prepare_keyboard(lst):\n res = []\n tmp = []\n for item in lst:\n item = getUserDict(item)\n if len(tmp) == 2:\n res.append(tmp)\n tmp = []\n\n if len(tmp) == 1 or len(tmp) == 0:\n if item['username'] != \"\":\n string = item['username'] + ' - ' + item['firstname']\n else:\n string = item['name']\n tmp.append(string)\n\n res.append(tmp)\n\n return res\n\ndef getUserDict(text):\n for user in users_data:\n if user['userid'] == text or user['username'] == text or user['name'] == users_data:\n return user\n\ndef getUser(update):\n return update.message.from_user\n\ndef sendMessageViaUserId(userid, text):\n bot.sendMessage(chat_id=userid, text=text)\n\ndef reply_to_user(update, text):\n update.message.reply_text(text, parse_mode='MarkdownV2')\n\ndef reply_with_keyboard(update, keyboard, text=''):\n update.message.reply_text(\n text,\n reply_markup=ReplyKeyboardMarkup(keyboard, one_time_keyboard=True)\n )\n\ndef write_active_users_to_file():\n with open('active_users', 'w') as f:\n f.write('\\n'.join(active_users))\n\ndef add_userid_to_file(userid):\n if userid not in active_users:\n active_users.append(userid)\n write_active_users_to_file()\n\ndef notify_other_active_users(userid, name):\n for user in active_users:\n if userid == user:\n continue\n sendMessageViaUserId(user, \"The {} has joined the bot, you can send him/her a secret message\".format(name))\n log.debug(\"The user with id {} is notified about {}\".format(user, name))\n\ndef getName(userid):\n for userdict in users_data:\n if userdict['userid'] == userid:\n return userdict['name']\n\ndef get_and_save_encryption_key(update, context):\n key = update.message.text.split('\\n')[0].encode('utf-8')\n orig_key = key\n while len(key) < 32:\n key += orig_key\n \n key = key[:32]\n new_key = base64.b64encode(key)\n user = update.message.from_user\n users_keys[str(user['id'])] = new_key\n\n reply_to_user(update, \"Thanks\\\\! I'll use this key to encrypt the messages so that even the author of the bot won't be able to read them\\\\!\")\n\n return ConversationHandler.END\n\ndef simple_send():\n for user in active_users:\n sendMessageViaUserId(user, \"Hello!\")\n\ndef setup_schedule():\n schedule.every().day.at(\"18:01\").do(scheduled_job)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\ndef setup():\n global bot\n setup_workdir()\n updater = Updater(config.BOTTOKEN, use_context=True)\n bot = updater.bot\n dispatcher = updater.dispatcher\n\n conv_handler = ConversationHandler(\n entry_points=[\n CommandHandler('start', start), \n CommandHandler('help', gethelp),\n CommandHandler('list', list_active_users),\n CommandHandler('letter', letter),\n CommandHandler('delete', start_delete),\n CommandHandler('send', send_messages),\n ],\n states = {\n GET_KEY: [MessageHandler(Filters.text, get_and_save_encryption_key)],\n GET_RECIPIENT: [MessageHandler(Filters.text, get_recipient)],\n GET_LETTER: [MessageHandler(Filters.text, get_message)],\n GET_DELETE_RECIPIENT: [MessageHandler(Filters.text, get_delete_recipient)],\n CONFIRM_DELETE: [MessageHandler(Filters.regex(r'^(Yes|No)'), confirm_delete_message)],\n },\n fallbacks=[CommandHandler('cancel', cancel)]\n )\n\n dispatcher.add_handler(conv_handler)\n\n t = threading.Thread(target=setup_schedule, args=())\n t.start()\n\n updater.start_polling()\n updater.idle()\n\n\ndef start(update: Update, context: CallbackContext) -> int:\n user = getUser(update)\n log.info(\"User with userid {} and username {} connected to bot\".format(user['id'], user['username']))\n add_userid_to_file(str(user['id']))\n\n context.user_data['letter_to'] = []\n userdict = {\n 'userid': str(user['id']),\n 'username': user['username'],\n 'firstname': user['first_name'],\n 'lastname': user['last_name'],\n 'name': user.full_name\n }\n notify_other_active_users(userdict['userid'], userdict['name'])\n\n users_data.append(userdict)\n reply_to_user(update, messages.START)\n\n return GET_KEY\n\ndef gethelp(update, context):\n reply_to_user(update, messages.HELP)\n return ConversationHandler.END\n\ndef list_active_users(update, context):\n userlist = \"\"\n for user in active_users:\n userlist += \"\\\\+ \" + getName(user)\n\n prepared_message = \\\n \"Okay, these people are ready to accept secret letters:\\n\" + userlist\n\n reply_to_user(update, prepared_message)\n return ConversationHandler.END\n\ndef letter(update, context):\n keyboard = prepare_keyboard(active_users)\n log.debug(\"The keyboard looks like this: {}\".format(keyboard))\n reply_with_keyboard(update, keyboard, messages.PROMPT_TO_GET_RECIPIENT)\n\n return GET_RECIPIENT\n\ndef get_recipient(update, context):\n text = update.message.text.split(\" - \")[0]\n userdict = getUserDict(text)\n if userdict is not None:\n context.user_data['recipient'] = userdict['userid']\n reply_to_user(update, messages.PROMPT_TO_GET_LETTER)\n return GET_LETTER\n else:\n reply_to_user(update, messages.TRY_AGAIN_RECIPIENT)\n return GET_RECIPIENT\n\ndef get_message(update, context):\n message = update.message.text\n user = update.message.from_user\n\n message = encrypt_message(message, users_keys[str(user['id'])])\n path = os.path.join(config.WORKDIR, \"text{}#{}.enc\".format(user['id'], context.user_data['recipient']))\n\n with open(path, 'wb') as f:\n f.write(message)\n \n reply_to_user(update, messages.GET_MESSAGE)\n\n letter_to = context.user_data['letter_to']\n if context.user_data['recipient'] not in letter_to:\n letter_to.append(context.user_data['recipient'])\n\n return ConversationHandler.END\n\ndef start_delete(update, context):\n if len(context.user_data['letter_to']) == 0:\n reply_to_user(update, messages.NO_MESSAGES_TO_DELETE)\n return ConversationHandler.END\n\n keyboard = prepare_keyboard(context.user_data['letter_to'])\n reply_with_keyboard(update, keyboard, text=messages.START_DELETE)\n\n return GET_DELETE_RECIPIENT\n\ndef get_delete_recipient(update, context):\n text = update.message.text.split(\" - \")[0]\n userdict = getUserDict(text)\n\n if userdict is not None:\n context.user_data['delete_recipient'] = userdict['userid']\n keyboard = [['Yes', 'No']]\n reply_with_keyboard(update, keyboard, messages.CONFIRM_DELETE)\n\n return CONFIRM_DELETE\n else:\n keyboard = prepare_keyboard(context.user_data['letter_to'])\n reply_with_keyboard(update, keyboard, text=messages.TRY_AGAIN_DELETE_RECIPIENT)\n\n return GET_DELETE_RECIPIENT\n\ndef confirm_delete_message(update, context):\n text = update.message.text\n if text == \"Yes\":\n filename = \"text{}#{}.enc\".format(update.message.from_user['id'], context.user_data['delete_recipient'])\n context.user_data['letter_to'].remove(context.user_data['delete_recipient'])\n log.debug(\"The letter_to is {} and the context.user_data['delete_recipient'] is {}\".format(context.user_data['letter_to'], context.user_data['delete_recipient']))\n context.user_data['delete_recipient'] = ''\n path = os.path.join(config.WORKDIR, filename)\n\n if os.path.exists(path):\n os.remove(path)\n reply_to_user(update, messages.DELETED_LETTER)\n else:\n reply_to_user(update, messages.DIDNT_DELETE)\n\n return ConversationHandler.END\n\ndef cancel(update, context):\n reply_to_user(update, \"Okay, operation canceled.\")\n return ConversationHandler.END\n\ndef send_messages(update, context):\n user = update.message.from_user\n if user['username'] != \"KtlTheBest\":\n reply_to_user(update, \"Sorry, you are not allowed to use this command\")\n return ConversationHandler.END\n\n sendout_and_delete_messages()\n return ConversationHandler.END\n\ndef main():\n setup()\n return\n\nif __name__ == \"__main__\":\n main()\n" } ]
4
cornell-cup/ece-edison-sockets
https://github.com/cornell-cup/ece-edison-sockets
2bebb2f43483df599251fe3e85702ffd0dbfa3ac
f55003173923c54b838b0927c485fc1d2b4ed1b9
5bec13f24c59bc4e796c51c625a4cd951a7d1e1b
refs/heads/master
2021-01-21T19:20:44.073566
2016-03-31T03:37:57
2016-03-31T03:37:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.767578125, "alphanum_fraction": 0.767578125, "avg_line_length": 38.230770111083984, "blob_id": "a3e4dfae3907e17b28670a3344e357fdeed00fe7", "content_id": "a6ab193afd62f3048506bb7ca4b4bbdffebfd8f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 512, "license_type": "no_license", "max_line_length": 127, "num_lines": 13, "path": "/README.md", "repo_name": "cornell-cup/ece-edison-sockets", "src_encoding": "UTF-8", "text": "# Socket Programming with Edison\nWe would like to communicate with Intel Edison wirelessly so we setup the Edison as a server and commincate with it using TCP. \n\n### Running network\nOn the server side you only need to call the tcp_server file\n> python tcp_server.py\n\nOn the client side you need call the file along with the host name and port number, as follows\n> python tcp_client.py hostname port\n\n### Task List\n- [ ] Use socketServer class for easier management\n- [ ] Use threads for non-blocking execution \n" }, { "alpha_fraction": 0.5569917559623718, "alphanum_fraction": 0.5687426328659058, "avg_line_length": 23.84848403930664, "blob_id": "bee0257ba6f000564c510666ad4849501a0fa188", "content_id": "0e478d35e4ae4b4afb7bb5bfb1be5aae86e1a318", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 62, "num_lines": 33, "path": "/tcp_server_threads.py", "repo_name": "cornell-cup/ece-edison-sockets", "src_encoding": "UTF-8", "text": "''' TCP Server with threads '''\r\nimport sys\r\nimport socket\r\nfrom thread import * \r\n# import mraa\r\n\r\ndef client_thread(sock, address):\r\n sock.sendall(\"You are now connected to the server\\n\")\r\n while True:\r\n # receiving data from client\r\n data = sock.recv(1024)\r\n msg = 'You sent: ' + data\r\n if not data:\r\n break\r\n sock.sendall(msg)\r\n print \"Client (%s,%s) is offline\" % address\r\n sock.close()\r\n\r\nif __name__ == \"__main__\":\r\n HOST = '' \r\n PORT = 6789 \r\n\r\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server.bind((HOST, PORT))\r\n server.listen(10)\r\n print \"Server is listening\"\r\n\r\n while True:\r\n sock, addr = server.accept()\r\n print \"Client (%s, %s) connected\" % addr\r\n start_new_thread(client_thread, (sock,addr))\r\n\r\n server.close()" }, { "alpha_fraction": 0.4996746778488159, "alphanum_fraction": 0.5048796534538269, "avg_line_length": 24.96491241455078, "blob_id": "db40e0b0e5edd795ac78a47b521c3ac544c59737", "content_id": "005488f8d993aaff5654fb330b781651b685b0eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "no_license", "max_line_length": 87, "num_lines": 57, "path": "/tcp_client.py", "repo_name": "cornell-cup/ece-edison-sockets", "src_encoding": "UTF-8", "text": "import socket\r\nimport select\r\nimport string\r\nimport sys\r\n\r\ndef prompt(myName):\r\n # dislay name for client\r\n sys.stdout.write('<' + myName + '> ')\r\n sys.stdout.flush()\r\n\r\n\r\n# main function\r\nif __name__ == \"__main__\":\r\n if (len(sys.argv) < 3):\r\n print 'Usage: python tcp_client.py HOSTNAME PORT'\r\n sys.exit()\r\n\r\n HOST = sys.argv[1]\r\n PORT = int(sys.argv[2])\r\n RECV_BUFFER = 4096\r\n\r\n c_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n c_sock.settimeout(2)\r\n\r\n # connect to remote host\r\n try: \r\n c_sock.connect((HOST, PORT))\r\n except :\r\n print 'Unable to connect'\r\n sys.exit()\r\n\r\n print 'Connected to host ' + str(HOST) + 'Start sending messages'\r\n prompt('ME')\r\n\r\n while True:\r\n socket_list = [sys.stdin, c_sock]\r\n\r\n # Get the sockets which are readable\r\n read_sockets, write_sockets, error_sockets = select.select(socket_list, [], [])\r\n\r\n for sock in read_sockets:\r\n # incoming message from remote server \r\n if sock == c_sock:\r\n data = sock.recv(RECV_BUFFER)\r\n if not data:\r\n print '\\nDisconnected from TCP server'\r\n sys.exit()\r\n else:\r\n # print data\r\n sys.stdout.write(data)\r\n prompt('ME')\r\n\r\n #user entered a message \r\n else:\r\n msg = sys.stdin.readline()\r\n c_sock.send(msg)\r\n prompt('ME')\r\n" } ]
3
ChriXiang/Feature-maps-visualization
https://github.com/ChriXiang/Feature-maps-visualization
370f6452d77186765b875ede6f5dc562795a059e
91a23763b3ef4d5a1a49be8ed2e4cfd64beb8049
58258aee7a37b87b0a691030a6216fb62502734a
refs/heads/master
2020-08-04T07:22:31.466636
2019-10-01T09:46:37
2019-10-01T09:46:37
212,054,413
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.624826192855835, "alphanum_fraction": 0.671002984046936, "avg_line_length": 37.88417053222656, "blob_id": "71590df7b2d9ef5118b506b61feca8f592f93905", "content_id": "86cd03b7c6aadd324e04f709cf766ee6df08fbbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10070, "license_type": "no_license", "max_line_length": 118, "num_lines": 259, "path": "/vis_deconv.py", "repo_name": "ChriXiang/Feature-maps-visualization", "src_encoding": "UTF-8", "text": "import cv2\nimage = cv2.imread('/Users/xiangtiange1/Desktop/cbir/pathology_image/Training_data/Benign/t9.tif')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = cv2.resize(image,(224,224))\n#use cv2 to read in image and resize to fit in VGG\n\ndef get_map(arg):\n\t\"\"\"\n\tHelper method to get index map\n\targ: output BEFORE max pooling\n\treturn: index map\n\t\"\"\"\n index_map = np.zeros((arg.shape[3],arg.shape[1]//2, arg.shape[2]//2))\n for idx in range(arg.shape[3]):\n for row in range(arg.shape[1]//2):\n for col in range(arg.shape[2]//2):\n index_map[idx][row][col] = np.argmax(arg[0,:,:,idx][row*2:row*2+2,col*2:col*2+2])\n print(index_map.shape)\n return index_map\n\ndef unpooling(index, value):\n\t\"\"\"\n\tHelper method to unpool\n\tindex: index map from get_map\n\tvalue: incoming feature map for unpooling\n\treturn: unpooled feature map\n\t\"\"\"\n holders = np.zeros((1, index.shape[1] * 2, index.shape[2] * 2, index.shape[0]))\n for idx in range(index.shape[0]):\n for row in range(index.shape[1]):\n for col in range(index.shape[2]):\n if index[idx,row,col] == 0:\n holders[0,row*2,col*2,idx] = value[0,row,col,idx]\n elif index[idx,row,col] == 1:\n holders[0,row*2 + 1,col*2,idx] = value[0,row,col,idx]\n elif index[idx,row,col] == 2:\n holders[0,row*2,col*2 + 1,idx] = value[0,row,col,idx]\n elif index[idx,row,col] == 3:\n holders[0,row*2 + 1,col*2 + 1,idx] = value[0,row,col,idx]\n print(holders.shape)\n return holders\n\n#---------------dependencies---------------\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import Conv2D, Convolution2D, MaxPooling2D, Conv2DTranspose\nfrom keras.models import Sequential, Model, load_model\n\nfrom keras.utils.np_utils import *\nfrom keras.preprocessing import image\n#-----------construct deconv blocks---------\n'''\nunpool the feature map between every adjcant block.\nto capture a feature map of a particular block, set the 3rd parameters of input_tenosr shape to be 1.\ne.g. fetch one feature map from block 4, set input_tensor4 = Input(shape = (img_size/8, img_size/8, 512 --> 1))\nOnly the captured block input_tensor should be set to 1, all others maintain the same shape.\n'''\nimg_size = 224\n\ninput_tensor5 = Input(shape = (img_size/16, img_size/16, 512))\n# x = Activation(\"relu\")(input_tensor5)\nx = Conv2DTranspose(512,(3,3), activation='relu', padding='same', name='t13')(input_tensor5)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(512,(3,3), activation='relu', padding='same', name='t12')(x)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(512,(3,3), activation='relu', padding='same', name='t11')(x)\nblock5_model = Model(input_tensor5, x)\n\ninput_tensor4 = Input(shape = (img_size/8, img_size/8, 512))\n#x = Activation(\"relu\")(input_tensor4)\nx = Conv2DTranspose(512,(3,3), activation='relu', padding='same', name='t10')(input_tensor4)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(512,(3,3), activation='relu', padding='same', name='t9')(x)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(256,(3,3), activation='relu', padding='same', name='t8')(x)\nblock4_model = Model(input_tensor4, x)\n\ninput_tensor3 = Input(shape = (img_size/4, img_size/4, 256))\n#x = Activation(\"relu\")(input_tensor3)\nx = Conv2DTranspose(256,(3,3), activation='relu', padding='same', name='t7')(input_tensor3)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(256,(3,3), activation='relu', padding='same', name='t6')(x)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(128,(3,3), activation='relu', padding='same', name='t5')(x)\nblock3_model = Model(input_tensor3, x)\n\ninput_tensor2 = Input(shape = (img_size/2, img_size/2, 128))\n#x = Activation(\"relu\")(input_tensor2)\nx = Conv2DTranspose(128,(3,3), activation='relu', padding='same', name='t4')(input_tensor2)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(64,(3,3), activation='relu', padding='same', name='t3')(x)\nblock2_model = Model(input_tensor2, x)\n\ninput_tensor1 = Input(shape = (img_size, img_size, 64))\n#x = Activation(\"relu\")(input_tensor1)\nx = Conv2DTranspose(64,(3,3), activation='relu', padding='same', name='t2')(input_tensor1)\n#x = Activation(\"relu\")(x)\nx = Conv2DTranspose(3,(3,3), activation='relu', padding='same', name='t1')(x)\n#x = Activation(\"relu\")(x)\nblock1_model = Model(input_tensor1, x)\n\n\n#-----------find the most predominant feature map----\nfetch = Model(inputs=base_model.input, outputs=base_model.get_layer('conv2d_52').output)\nimg_in = fetch.predict([[image]])\nmmax = 0\nmarg = -1 #saved as index to select the same filter while setting weights\nfor i in range(img_in.shape[3]):\n if np.sum(img_in[0,:,:,i]) > mmax:\n marg = i\n mmax = np.sum(img_in[0,:,:,i])\nprint(mmax, marg)\n\n\n#-----------set weights---------\n'''\nweights[0]: filters (maintain the same)\nweights[1]: bias (all set to 0)\nuncomment the code in corresponding block, which is supposed to be consistent with the precious construct section.\nAll other blocks should leave the commented code commented.\n'''\nweights = base_model.get_layer('conv2d_52').get_weights()\nweights[1] = weights[1] * 0\n# rep = weights[0][:,:,:,marg]\n# rep = np.expand_dims(rep, axis=3)\n# print(rep.shape)\n# weights[0] = rep\nblock5_model.get_layer('t13').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_51').get_weights()\nweights[1] = weights[1] * 0\nblock5_model.get_layer('t12').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_50').get_weights()\nweights[1] = weights[1] * 0\nblock5_model.get_layer('t11').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_49').get_weights()\nweights[1] = weights[1] * 0\n# rep = weights[0][:,:,:,marg]\n# rep = np.expand_dims(rep, axis=3)\n# print(rep.shape)\n# weights[0] = rep\nblock4_model.get_layer('t10').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_48').get_weights()\nweights[1] = weights[1] * 0\nblock4_model.get_layer('t9').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_47').get_weights()\nweights[1] = weights[1][0:256] * 0\nblock4_model.get_layer('t8').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_46').get_weights()\nweights[1] = weights[1] * 0\n# rep = weights[0][:,:,:,marg]\n# rep = np.expand_dims(rep, axis=3)\n# print(rep.shape)\n# weights[0] = rep\nblock3_model.get_layer('t7').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_45').get_weights()\nweights[1] = weights[1] * 0\n\nblock3_model.get_layer('t6').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_44').get_weights()\nweights[1] = weights[1][0:128] * 0\nblock3_model.get_layer('t5').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_43').get_weights()\nweights[1] = weights[1] * 0\n# rep = weights[0][:,:,:,marg]\n# rep = np.expand_dims(rep, axis=3)\n# print(rep.shape)\n# weights[0] = rep\nblock2_model.get_layer('t4').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_42').get_weights()\nweights[1] = weights[1][0:64] * 0\nblock2_model.get_layer('t3').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_41').get_weights()\nweights[1] = weights[1] * 0\n# rep = weights[0][:,:,:,marg]\n# rep = np.expand_dims(rep, axis=3)\n# print(rep.shape)\n# weights[0] = rep\nblock1_model.get_layer('t2').set_weights(weights)\n\nweights = base_model.get_layer('conv2d_40').get_weights()\nweights[1] = weights[1][0:3] * 0\nblock1_model.get_layer('t1').set_weights(weights)\n#-----------set weights ends---------\n\n#-----------process---------\n'''\ngeneral work flow: feature map => (deconv => relu => unpooling) * conv_layer_number => output\nready_pools: get the most recent feature map BEFORE the upcoming pooling layer, so that we can record the max indices.\nThe process MUST ALWAYS start with phase1, then insert phase1 to the blocks you whant.\nphase6 is the final result matrix.\n'''\nphase1 = np.expand_dims(fetch.predict([[image]])[:,:,:,marg],axis=3)\n\nphase2 = block5_model.predict([phase1])#default start point if you want to work with feature map from block 5\n\nfetch = Model(inputs=base_model.input, outputs=base_model.get_layer('conv2d_49').output)\nready_pool = fetch.predict([[image]])\n\nindex_map = get_map(ready_pool)\nunpooled = unpooling(index_map, phase2)\n\nphase3 = block4_model.predict([unpooled])\n#start from here, set unpooled --> phase1 if you want to work with feature map from block 4 \n\nfetch = Model(inputs=base_model.input, outputs=base_model.get_layer('conv2d_46').output)\nready_pool2 = fetch.predict([[image]])\n\nindex_map2 = get_map(ready_pool2)\nunpooled2 = unpooling(index_map2, phase3)\n\nphase4 = block3_model.predict([unpooled2])\n#start from here, set unpooled2 --> phase1 if you want to work with feature map from block 3\n\nfetch = Model(inputs=base_model.input, outputs=base_model.get_layer('conv2d_43').output)\nready_pool3 = fetch.predict([[image]])\n\nindex_map3 = get_map(ready_pool3)\nunpooled3 = unpooling(index_map3, phase4)\n\nphase5 = block2_model.predict([unpooled3])\n#start from here, set unpooled3 --> phase1 if you want to work with feature map from block 2\n\nfetch = Model(inputs=base_model.input, outputs=base_model.get_layer('conv2d_41').output)\nready_pool4 = fetch.predict([[image]])\n\nindex_map4 = get_map(ready_pool4)\nunpooled4 = unpooling(index_map4, phase5)\n\nphase6 = block1_model.predict([unpooled4])\n#start from here, set unpooled4 --> phase1 if you want to work with feature map from block 1\n\n#-----------show output---------\n'''\nrerange the output to 0-1\n'''\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfor idx in range(3):\n print(np.min(phase6[0,:,:,idx]), np.max(phase6[0,:,:,idx]), np.mean(phase6[0,:,:,idx]))\n if np.min(phase6[0,:,:,idx]) < 0:\n phase6[0,:,:,idx] = phase6[0,:,:,idx] + -1 * np.min(phase6[0,:,:,idx])\n if np.max(phase6[0,:,:,idx]) != 0:\n phase6[0,:,:,idx] = phase6[0,:,:,idx] /np.max(phase6[0,:,:,idx])\n \nplt.imshow(phase6[0,:,:,:])\nplt.show()\n#plt.imsave('result.tif',phase6[0,:,:,:])" }, { "alpha_fraction": 0.6565656661987305, "alphanum_fraction": 0.7003366947174072, "avg_line_length": 16.47058868408203, "blob_id": "c96216210d15758bc71ebc727c57b3e9441a6686", "content_id": "619d5892217c48be3a69e154613b3fcd0ec538f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 594, "license_type": "no_license", "max_line_length": 94, "num_lines": 34, "path": "/README.md", "repo_name": "ChriXiang/Feature-maps-visualization", "src_encoding": "UTF-8", "text": "# Feature-maps-visualization*\n\n* Project due date: 2019 January.\n* My implementation of \"Visualizing and Understanding Convolutional Networks\" on VGG16.\n* Only the feature map with the maximum activated values has been selected in each conv block.\n\n## Demo Results\n\n### Input\n\n![input](/images/input.jpg)\n\n### Conv block1-conv1\n\n![f1](/images/f1.jpg)\n\n### Conv block2-conv1\n\n![f2](/images/f2.jpg)\n\n### Conv block3-conv1\n\n![f3](/images/f3.jpg)\n\n### Conv block4-conv1\n\n![f4](/images/f4.jpg)\n\n### Conv block5-conv1\n\n![f5](/images/f5.jpg)\n\n \n*The project was done during an internship in ICT,CAS.\n" } ]
2
nickmachnik/codon-degeneracy
https://github.com/nickmachnik/codon-degeneracy
76157da73e61d936b4d5113386ca6c440beb2fc2
c402056abac520ce3832c390ce6aafa9a5243ec4
7e281aaea1679067e74cc2d3a9b11fa6b1580b7a
refs/heads/master
2022-11-05T07:37:28.515311
2020-06-17T09:58:33
2020-06-17T09:58:33
266,854,762
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5643609166145325, "alphanum_fraction": 0.5750435590744019, "avg_line_length": 32.24685287475586, "blob_id": "cdf8d090f2c34300d8dd568705a283cff4dc23f5", "content_id": "d5990c80ab420d90efbcb40f0885c09e8eacf6fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13201, "license_type": "permissive", "max_line_length": 87, "num_lines": 397, "path": "/codon_degeneracy.py", "repo_name": "nickmachnik/codon-degeneracy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom Bio.Seq import Seq\nfrom Bio.Data.CodonTable import unambiguous_dna_by_name\nfrom skbio.alignment import local_pairwise_align_ssw\nfrom skbio.sequence import Protein\nfrom skbio.alignment._pairwise import blosum50\nfrom itertools import combinations\n\n\ndef _truncate(seq: str):\n \"\"\"Removes the prefix before the first ATG.\n and any trailing nucleotides that are not\n part of a full codon.\n\n Args:\n seq (str): Nucleotide sequence to truncate\n\n Returns:\n str or None: the truncated sequence or None if\n no start codon was found.\n \"\"\"\n seq = seq.upper()\n for start in range(0, len(seq) - 2):\n codon = seq[start:start+3]\n if codon == \"ATG\":\n res = seq[start:]\n trailing_nuc = len(res) % 3\n if trailing_nuc > 0:\n return res[:-trailing_nuc]\n else:\n return res\n\n\ndef _translate(seq: str, table=\"Standard\"):\n \"\"\"Translates a given DNA sequence into a\n protein sequence, using a specified codon table.\n\n Args:\n seq (str): DNA sequence. Expects the coding strand and\n a start with a leading ATG codon.\n table (str, optional): NCBI table name as used in Bio.Data.CodonTable\n\n Returns:\n skbio.sequence.Protein: Protein object with the translated sequence.\n \"\"\"\n return Protein(str(Seq(seq).translate(table=table)))\n\n\ndef _align(a: Protein, b: Protein):\n \"\"\"Wraps the skbio pairwise ssw alilgner.\n\n Args:\n a (str): sequence a\n b (str): sequence b\n\n Returns:\n skbio.alignment.TabularMSA: skbio alignment table\n \"\"\"\n return local_pairwise_align_ssw(a, b, substitution_matrix=blosum50)\n\n\ndef _hamming_distance(a, b):\n distance = 0\n differential_sites = []\n for p in range(len(a)):\n if a[p] != b[p]:\n distance += 1\n differential_sites.append(p)\n return distance, differential_sites\n\n\ndef _site_degeneracy(codons, min_x=None):\n \"\"\"Group codons by site for which they are degenerate.\n\n Args:\n codons (array_like): array of base triplets, e.g. codons for\n a particular amino acid.\n min_x (None, optional): Filters out groups of degenerate codons that\n have less than min_x elements.\n\n Returns:\n dict: mapping between each degenerate site and the groups of\n codons which are degenerate at that site.\n \"\"\"\n def resolve_pairs(pairs):\n codons = set()\n for a, b in pairs:\n codons.add(a)\n codons.add(b)\n\n resolved = [set([codons.pop()])]\n for a in codons:\n added = False\n for group in resolved:\n if any((a, b) in pairs or (b, a) in pairs for b in group):\n group.add(a)\n added = True\n break\n if not added:\n resolved.append(set([a]))\n return resolved\n\n sites2pairs = {}\n for a, b in combinations(codons, 2):\n distance, sites = _hamming_distance(a, b)\n if distance == 1:\n sites2pairs.setdefault(sites[0], set())\n sites2pairs[sites[0]].add((a, b))\n\n if min_x is None:\n res = {site: resolve_pairs(pairs)\n for site, pairs in sites2pairs.items()}\n else:\n res = {}\n for site, pairs in sites2pairs.items():\n resolved = [s for s in resolve_pairs(pairs) if len(s) >= min_x]\n if resolved:\n res[site] = resolved\n return res\n\n\ndef _x_fold_degenerate_aa_from_codon_table(x: int, table: str):\n \"\"\"Extracts amino acids that are encoded by x\n different codons from an NCBI codon table.\n\n Args:\n x (int): fold degeneration to filter by.\n table (str, optional): NCBI table name as used in Bio.Data.CodonTable\n\n Returns:\n dict: maps each amino acid (keys) to a dict that specifies\n the degenerate codons for each site.\n\n \"\"\"\n codon_table = unambiguous_dna_by_name[table].forward_table\n reverse_table = {}\n for codon, aa in codon_table.items():\n reverse_table.setdefault(aa, [])\n reverse_table[aa].append(codon)\n x_fold_table = {}\n for aa, codons in reverse_table.items():\n if len(codons) >= x:\n degeneracy = _site_degeneracy(codons, x)\n if degeneracy:\n x_fold_table[aa] = degeneracy\n return x_fold_table\n\n\ndef _triplet(seq: str, i: int):\n \"\"\"Return the ith triplet in a sequence.\n\n Args:\n seq (str): Sequence\n i (int): 0-based index of the target triplet.\n\n Returns:\n str: The target triplet.\n \"\"\"\n start = i * 3\n return seq[start:start+3]\n\n\ndef _triplet_with_context(seq: str, i: int):\n \"\"\"Return the ith triplet in a sequence with the\n previous and following nucleotide\n\n Args:\n seq (str): Sequence\n i (int): 0-based index of the target triplet.\n\n Returns:\n str: The target triplet.\n \"\"\"\n start = i * 3\n return seq[start-1:start+4]\n\n\ndef _aligned_ffds(a: str, b: str, table_a=\"Standard\", table_b=\"Standard\"):\n \"\"\"Extracts the four-fold degenerate codons at conserved sites\n from a protein sequence alignment of two coding DNA sequences.\n\n Args:\n a (str): coding DNA sequence a\n b (str): coding DNA sequence b\n table_a (str, optional): NCBI table name as used in Bio.Data.CodonTable\n table_b (str, optional): NCBI table name as used in Bio.Data.CodonTable\n\n Yields:\n tuple: aligned four-fold degenerate codons in sequence a and b\n\n Raises:\n ValueError: if any of the input sequences do not contain\n an ATG start codon.\n \"\"\"\n proteins = []\n truncated = []\n for s, table in [[a, table_a], [b, table_b]]:\n ts = _truncate(s)\n if not ts:\n raise ValueError(\"DNA sequence without ATG codon provided!\")\n else:\n truncated.append(ts)\n proteins.append(_translate(ts, table))\n alignment = _align(*proteins)\n\n # shorten the input sequences to the aligned subsequences\n a = truncated[0][alignment[2][0][0]*3:]\n b = truncated[1][alignment[2][1][0]*3:]\n\n degenerate_aa = set(\n _x_fold_degenerate_aa_from_codon_table(4, table_a)).intersection(\n set(_x_fold_degenerate_aa_from_codon_table(4, table_b)))\n\n # iterate over the aligned sequences\n for i, (ca, cb) in enumerate(zip(\n str(alignment[0][0]), str(alignment[0][1]))\n ):\n if ca == cb and ca in degenerate_aa:\n yield _triplet(a, i), _triplet(b, i)\n\n\ndef substitutions_per_ffds(\n a: str, b: str, table_a=\"Standard\", table_b=\"Standard\"\n) -> ((int, int), [str, str]):\n \"\"\"Estimates the numbers of neutral substitutions per site by counting\n the number of substitutions at four-fold degenerate sites sites.\n\n Args:\n a (str): coding DNA sequence a\n b (str): coding DNA sequence b\n table_a (str, optional): NCBI table name as used in Bio.Data.CodonTable\n table_b (str, optional): NCBI table name as used in Bio.Data.CodonTable\n\n Returns:\n (int, int): number of substitutions, number of sites\n [str, str]: the selected ORFs of the input sequences\n \"\"\"\n proteins = []\n truncated = []\n for s, table in [[a, table_a], [b, table_b]]:\n ts = _truncate(s)\n if not ts:\n raise ValueError(\"DNA sequence without ATG codon provided!\")\n else:\n truncated.append(ts)\n proteins.append(_translate(ts, table))\n alignment = _align(*proteins)\n\n # shorten the input sequences to the aligned subsequences\n a = truncated[0][alignment[2][0][0]*3:]\n b = truncated[1][alignment[2][1][0]*3:]\n\n ffdc_a = _x_fold_degenerate_aa_from_codon_table(4, table_a)\n ffdc_b = _x_fold_degenerate_aa_from_codon_table(4, table_b)\n\n common_ffds = {}\n for aa in set(ffdc_a).intersection(set(ffdc_b)):\n for site, codons in ffdc_a[aa].items():\n if site in ffdc_b[aa]:\n common_codons = []\n for codon_set in codons:\n if codon_set in ffdc_b[aa][site]:\n common_codons.append(codon_set)\n if common_codons:\n common_ffds.setdefault(aa, dict())\n common_ffds[aa][site] = common_codons\n\n n_sites = 0\n n_sub = 0\n offset_a, offset_b = 0, 0\n # iterate over the aligned sequences\n for i, (ca, cb) in enumerate(zip(\n str(alignment[0][0]), str(alignment[0][1]))\n ):\n if ca == \"-\":\n offset_a += 1\n if cb == \"-\":\n offset_b += 1\n if ca == cb and ca in common_ffds:\n triplet_a, triplet_b = _triplet(a, i - offset_a), _triplet(b, i - offset_b)\n _, locs = _hamming_distance(triplet_a, triplet_b)\n n_sites += len(common_ffds[ca])\n for loc in locs:\n if loc in common_ffds[ca]:\n n_sub += 1\n\n return (n_sub, n_sites), truncated\n\n\ndef substitutions_per_ffds_by_cpg_context(\n a: str, b: str, table_a=\"Standard\", table_b=\"Standard\"\n):\n \"\"\"Estimates the numbers of neutral substitutions per site by counting\n the number of substitutions at four fold degenerate sites sites.\n Differentiates between four fold degenerate sites in different potential\n CpG contexts: preceded by C and not followed by G (nonCpG), preceded by C\n but not followed by G (postC), followed by G but not preceded by C (preG),\n and preceded by C and followed by G (postCpreG).\n\n Note: the number of sites considered here may be substantially lower than\n in substitutions_per_ffds, as this function requires the sites\n preceeding and following site of a four fold degenerate site\n to be identical in the two aligned sequences.\n\n See also:\n Kondrashov FA, Ogurtsov AY, Kondrashov AS. Selection in favor\n of nucleotidesG and C diversifies evolution rates and levels of\n polymorphism at mammalian synonymous sites.\n J Theor Biol. 2006;240(4):616‐626. doi:10.1016/j.jtbi.2005.10.020\n\n Args:\n a (str): coding DNA sequence a\n b (str): coding DNA sequence b\n table_a (str, optional): NCBI table name as used in Bio.Data.CodonTable\n table_b (str, optional): NCBI table name as used in Bio.Data.CodonTable\n \"\"\"\n\n def categorize_ffds(qintuplet_a, qintuplet_b, loc):\n if qintuplet_a[loc - 1] != qintuplet_b[loc - 1]:\n return None\n elif qintuplet_a[loc + 1] != qintuplet_b[loc + 1]:\n return None\n else:\n pre, post = qintuplet_a[loc - 1], qintuplet_a[loc + 1]\n\n if pre == 'C' and post == 'G':\n return \"postCpreG\"\n elif pre == 'C':\n return \"postC\"\n elif post == 'G':\n return \"preG\"\n else:\n return \"nonCpG\"\n\n proteins = []\n truncated = []\n for s, table in [[a, table_a], [b, table_b]]:\n ts = _truncate(s)\n if not ts:\n raise ValueError(\"DNA sequence without ATG codon provided!\")\n else:\n truncated.append(ts)\n proteins.append(_translate(ts, table))\n alignment = _align(*proteins)\n\n # shorten the input sequences to the aligned subsequences\n a = truncated[0][alignment[2][0][0]*3:]\n b = truncated[1][alignment[2][1][0]*3:]\n alignment_len = len(str(alignment[0][1]))\n\n ffdc_a = _x_fold_degenerate_aa_from_codon_table(4, table_a)\n ffdc_b = _x_fold_degenerate_aa_from_codon_table(4, table_b)\n\n common_ffds = {}\n for aa in set(ffdc_a).intersection(set(ffdc_b)):\n for site, codons in ffdc_a[aa].items():\n if site in ffdc_b[aa]:\n common_codons = []\n for codon_set in codons:\n if codon_set in ffdc_b[aa][site]:\n common_codons.append(codon_set)\n if common_codons:\n common_ffds.setdefault(aa, dict())\n common_ffds[aa][site] = common_codons\n\n subs_and_sites = {\n \"nonCpG\": [0, 0],\n \"postC\": [0, 0],\n \"preG\": [0, 0],\n \"postCpreG\": [0, 0]\n }\n offset_a, offset_b = 0, 0\n\n # iterate over the aligned sequences\n for i, (ca, cb) in enumerate(zip(\n str(alignment[0][0]), str(alignment[0][1]))\n ):\n if ca == \"-\":\n offset_a += 1\n if cb == \"-\":\n offset_b += 1\n if i == 0 or i == alignment_len - 1:\n continue\n if ca == cb and ca in common_ffds:\n qintuplet_a = _triplet_with_context(a, i - offset_a)\n qintuplet_b = _triplet_with_context(b, i - offset_b)\n _, locs = _hamming_distance(qintuplet_a[1:4], qintuplet_b[1:4])\n for loc in common_ffds[ca]:\n category = categorize_ffds(qintuplet_a, qintuplet_b, loc + 1)\n if category is None:\n continue\n else:\n subs_and_sites[category][1] += 1\n subs_and_sites[category][0] += (loc in locs)\n\n return subs_and_sites\n" }, { "alpha_fraction": 0.6962025165557861, "alphanum_fraction": 0.7179023623466492, "avg_line_length": 16.28125, "blob_id": "0db1b4057b89d35024fb822b3eced5ec06084cc7", "content_id": "e8af656751541ef58147c5308d6479cba8aaa695", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 553, "license_type": "permissive", "max_line_length": 104, "num_lines": 32, "path": "/CHANGELOG.md", "repo_name": "nickmachnik/codon-degeneracy", "src_encoding": "UTF-8", "text": "# Change Log\n\nAll notable changes to this project will be documented in this file.\nThis project adheres to [Semantic Versioning](http://semver.org/).\n\n## Unreleased\n\n## [0.1.3]\n\n### Fixed\n\n- Wrong installation info in README\n\n### Added\n\n- `substitutions_per_ffds_by_cpg_context`, which allows to differentiate between different CpG contexts.\n\n## [0.1.2]\n\n### Added\n\n- More thorough documentation\n\n## [0.1.1]\n\n### Fixed\n\n- Consideration of multiple four fold degenerate sites per codon in estimation of substitution counts\n\n## [0.1.0]\n\n- Initial release\n" }, { "alpha_fraction": 0.5965277552604675, "alphanum_fraction": 0.6013888716697693, "avg_line_length": 23.406780242919922, "blob_id": "e79137a1524c53fffc364c01246e5554060d5208", "content_id": "6b0a61e454c4be6a23155b621811eb316b42b220", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1440, "license_type": "permissive", "max_line_length": 138, "num_lines": 59, "path": "/setup.py", "repo_name": "nickmachnik/codon-degeneracy", "src_encoding": "UTF-8", "text": "import os\nfrom setuptools import setup, find_packages, Command\n\n\n__version__ = None\nexec(open('version.py').read())\n\n\nclass CleanCommand(Command):\n \"\"\"\n Custom clean command to tidy up the project root.\n \"\"\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n os.system(\n 'rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info ./htmlcov')\n\n\ndef read_file(filename):\n with open(os.path.join(os.path.dirname(__file__), filename)) as file:\n return file.read()\n\n\nsetup(\n author='Nick Noel Machnik',\n author_email='nick.machnik@gmail.com',\n name='codon-degeneracy',\n version=__version__,\n description=\"\"\"\n Routines for the extraction of degenerate sides and estimation of numbers neutral substitutions from sequences and alignments.\"\"\",\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n url='https://github.com/nickmachnik/codon-degeneracy.git',\n setup_requires=[\n 'setuptools>=18.0',\n ],\n packages=find_packages(),\n install_requires=[\n 'numpy>=1.8.0',\n 'scikit-bio',\n 'biopython'\n ],\n # scripts=['bin/chess'],\n cmdclass={\n 'clean': CleanCommand\n },\n classifiers=(\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ),\n)\n" }, { "alpha_fraction": 0.7531671524047852, "alphanum_fraction": 0.7574580907821655, "avg_line_length": 32.06756591796875, "blob_id": "9c6147f1895686371fbee3611520477871815864", "content_id": "e426dc0fac410b013147772d60c85161f2c95dc7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4894, "license_type": "permissive", "max_line_length": 238, "num_lines": 148, "path": "/README.md", "repo_name": "nickmachnik/codon-degeneracy", "src_encoding": "UTF-8", "text": "\n# codon-degeneracy\n[![Python application](https://github.com/nickmachnik/codon-degeneracy/workflows/Python%20application/badge.svg)](https://pypi.org/project/codon-degeneracy)\n![License](https://img.shields.io/github/license/nickmachnik/codon-degeneracy)\n\nThis python package provides routines for the extraction of [degenerate sites](https://en.wikipedia.org/wiki/Codon_degeneracy) from sequences and alignments. The latter is particularly useful for estimations of rates of neutral evolution.\n\n## Dependencies\n\nThis code uses [biopython](https://biopython.org/) and [scikit-bio](http://scikit-bio.org/) internally. In order to installl via pip, [numpy](https://numpy.org/) has to be installed.\n\n## Installing\n\nSimply clone this repo:\n\n```\ngit clone https://github.com/nickmachnik/codon-degeneracy.git [TARGET DIR]\n```\n\nand then install via pip\n```\npip install [TARGET DIR]\n```\n\nor install directly from PyPI (this won't include unreleased changes as specified in the [changelog](CHANGELOG.md)):\n```\npip install codon-degeneracy\n```\n\n## Testing\n\nTest the cloned package:\n```\ncd [TARGET DIR]\npython -m unittest\n```\n\n## Usage\n\nThere are more useful and well documented functions under the hood than shown here, which I enourage to explore by browsing the code.\n\n### Counting substitutions per four fold degenerate site\n\nOne of the main features of the package is the counting of neutral substitutions at four fold degenerate sites.\nThis is best done with known orthologue pairs between species.\n`substitution_rate_at_ffds` provides that functionality and is easy to use like so:\n```python\nfrom codon_degeneracy import substitution_rate_at_ffds as nsr\nseq_a = (\n \"ATACCCATGGCCAACCTCCTACTCCTCATTGTACCCATTC\"\n \"TAATCGCAATGGCATTCCTAATGCTTACCGAACGA\")\nseq_b = (\n \"ATGACCACAGTAAATCTCCTACTTATAATCATACCCACAT\"\n \"TAGCCGCCATAGCATTTCTCACACTCGTTGAACGA\")\n(number_of_substitutions, number_of_sites), (orf_a, orf_b) = nsr(\n # the input sequences\n seq_a,\n seq_b,\n # NCBI codon table names as used in Bio.Data.CodonTable\n \"Vertebrate Mitochondrial\",\n \"Vertebrate Mitochondrial\")\n```\nThe ORFs returned are there for sanity checks. The default behaviour is to select the first ATG codon\nas start.\n\n> NOTE: The numbers of neutral substitutions per site reported by this function are merely a lower bound,\n> as they do not include the possibility of multiple substitutions per site.\n\n### Substitutions at four fold degenerate sites separated by CpG context\n\nIn certain situations, it may be useful to differentiate between four fold degenerate sites\nthat could potentially exist in a CpG context and could therefore exhibit an elevated\nmutation rate and those that do not. `substitutions_per_ffds_by_cpg_context` provides that\nfunctionality.\nIt differentiates between four CpG contexts. Sites that are:\n - preceded by C and not followed by G (nonCpG)\n - preceded by C but not followed by G (postC)\n - followed by G but not preceded by C (preG)\n - preceded by C and followed by G (postCpreG)\n\n> Note: the number of sites considered here may be substantially lower than\n> in `substitutions_per_ffds`, as this function requires the sites\n> preceeding and following site of a four fold degenerate site\n> to be identical in the two aligned sequences.\n\nThe function can be used in exactly the same that is shown for `substitutions_per_ffds` above.\n\n## License\n\nMIT license ([LICENSE](LICENSE.txt) or https://opensource.org/licenses/MIT)\n\n<!-- \nEnd with an example of getting some data out of the system or using it for a little demo\n\n## Running the tests\n\nExplain how to run the automated tests for this system\n\n### Break down into end to end tests\n\nExplain what these tests test and why\n\n```\nGive an example\n```\n\n### And coding style tests\n\nExplain what these tests test and why\n\n```\nGive an example\n```\n\n## Deployment\n\nAdd additional notes about how to deploy this on a live system\n\n## Built With\n\n* [Dropwizard](http://www.dropwizard.io/1.0.2/docs/) - The web framework used\n* [Maven](https://maven.apache.org/) - Dependency Management\n* [ROME](https://rometools.github.io/rome/) - Used to generate RSS Feeds\n\n## Contributing\n\nPlease read [CONTRIBUTING.md](https://gist.github.com/PurpleBooth/b24679402957c63ec426) for details on our code of conduct, and the process for submitting pull requests to us.\n\n## Versioning\n\nWe use [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/your/project/tags).\n\n## Authors\n\n* **Billie Thompson** - *Initial work* - [PurpleBooth](https://github.com/PurpleBooth)\n\nSee also the list of [contributors](https://github.com/your/project/contributors) who participated in this project.\n\n## License\n\nThis project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details\n\n## Acknowledgments\n\n* Hat tip to anyone whose code was used\n* Inspiration\n* etc\n\n -->" } ]
4
saadmanrafat/twitivity
https://github.com/saadmanrafat/twitivity
d818703d574101b3dbc1683914d94a446283086e
26ab7b2bdd0725f34594df216852451eb3a955a8
ec830fcdb7ba4471606f59234509647a627681e2
refs/heads/master
2020-08-29T09:59:57.949398
2020-07-31T23:51:50
2020-07-31T23:51:50
217,999,942
30
6
null
null
null
null
null
[ { "alpha_fraction": 0.46378952264785767, "alphanum_fraction": 0.4697014391422272, "avg_line_length": 27.91452980041504, "blob_id": "a0f0266bb91dccea3d4463cf9a27ac26bc8c60a9", "content_id": "14b5fcbc9cf7bbd0452539870793f9dd840f96d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3383, "license_type": "permissive", "max_line_length": 88, "num_lines": 117, "path": "/twitivity.py", "repo_name": "saadmanrafat/twitivity", "src_encoding": "UTF-8", "text": "import json\nimport hmac\nimport os\nimport hashlib\nimport base64\nimport re\n\nimport requests\n\nfrom abc import ABC, abstractmethod\n\nfrom tweepy.error import TweepError\nfrom tweepy import OAuthHandler\nfrom flask import Flask, request\n\n\nclass Activity:\n _protocol: str = \"https:/\"\n _host: str = \"api.twitter.com\"\n _version: str = \"1.1\"\n _product: str = \"account_activity\"\n _auth: OAuthHandler = OAuthHandler(\n os.environ[\"consumer_key\"], os.environ[\"consumer_secret\"]\n )\n _auth.set_access_token(\n os.environ[\"access_token\"], os.environ[\"access_token_secret\"]\n )\n\n def api(self, method: str, endpoint: str, data: dict = None) -> json:\n \"\"\"\n :param method: GET or POST\n :param endpoint: API Endpoint to be specified by user\n :param data: POST Request payload parameter\n :return: json\n \"\"\"\n try:\n with requests.Session() as r:\n response = r.request(\n url=\"/\".join(\n [\n self._protocol,\n self._host,\n self._version,\n self._product,\n endpoint,\n ]\n ),\n method=method,\n auth=self._auth.apply_auth(),\n data=data,\n )\n return response\n except TweepError:\n raise\n\n def register_webhook(self, callback_url: str) -> json:\n try:\n return self.api(\n method=\"POST\",\n endpoint=f\"all/{os.environ['env_name']}/webhooks.json\",\n data={\"url\": callback_url},\n )\n except Exception:\n raise\n\n def subscribe(self) -> json:\n try:\n return self.api(\n method=\"POST\",\n endpoint=f\"all/{os.environ['env_name']}/subscriptions.json\",\n )\n except Exception:\n raise\n\n\ndef url_params(url: str) -> str:\n pattern: str = r\"^[^\\/]+:\\/\\/[^\\/]*?\\.?([^\\/.]+)\\.[^\\/.]+(?::\\d+)?\\/\"\n return re.split(pattern=pattern, string=url)[-1]\n\n\nclass Event(ABC):\n CALLBACK_URL: str = None\n\n def __init__(self):\n self._server = self._get_server()\n\n @abstractmethod\n def on_data(self, data: json) -> None:\n pass\n\n def listen(self) -> None:\n self._server.run()\n\n def _get_server(self) -> Flask:\n try:\n app = Flask(__name__)\n\n @app.route(f\"/{url_params(url=self.CALLBACK_URL)}\", methods=[\"GET\", \"POST\"])\n def callback() -> json:\n if request.method == \"GET\":\n hash_digest = hmac.digest(\n key=os.environ[\"consumer_secret\"].encode(\"utf-8\"),\n msg=request.args.get(\"crc_token\").encode(\"utf-8\"),\n digest=hashlib.sha256,\n )\n return {\n \"response_token\": \"sha256=\"\n + base64.b64encode(hash_digest).decode(\"ascii\")\n }\n elif request.method == \"POST\":\n data = request.get_json()\n self.on_data(data)\n return {\"code\": 200}\n\n return app\n except Exception:\n raise\n" }, { "alpha_fraction": 0.7369168996810913, "alphanum_fraction": 0.7487568259239197, "avg_line_length": 42.98958206176758, "blob_id": "3f5552948db62bcb1708c5c4ed62190995c772fa", "content_id": "f8219122e4d5b44e5adc42cf3d2b13396c94f241", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4225, "license_type": "permissive", "max_line_length": 458, "num_lines": 96, "path": "/README.md", "repo_name": "saadmanrafat/twitivity", "src_encoding": "UTF-8", "text": "# Twitivity\n![PyPI - Python Version](https://img.shields.io/pypi/pyversions/imgur-scraper) [![Downloads](https://pepy.tech/badge/twitivity)](https://pepy.tech/project/twitivity) ![PyPI - License](https://img.shields.io/pypi/l/imgur-scraper)\n\nTwitter [Accounts Activity](https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/overview) API Client Library for Python. \n\n![](demo.gif)\n\nAccount Activity API allows you to subscribe to user activities. Unlike Twitter's REST API or the Streaming API, the Account Activity API delivers data through webhook connections. Which makes it faster and allows it to deliver Twitter data to you in real-time. [You can subscribe to these user activities](https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/overview).\n\n## Getting Started\n\n* [Apply for a Twitter Developer Account](https://developer.twitter.com/en/account/get-started)\n* [Create an application](https://developer.twitter.com/en/apps), fill in the required fields, the callback URL is your domain name with an added endpoint, for example `https://yourdomain.com/listener`. Twitter will later use this URL to send you account activity data. Make sure to enable \"Read, Write and Direct messages\" permission.\n* Navigate to the [Dev environment section](https://developer.twitter.com/en/account/environments) and `setup a dev environment` for the Account Activity API. Name a dev environment label of your choosing and select your app.\n\nThe next step is to register your webhook URL. Twitter will send a `GET` request with Challenge-Response Check or CRC token to verify you are the owner of the app and the webhook URL. To validate, an encrypted response token based on your consumer key and the CRC token has to be sent back to Twitter. Upon successful validation, registration of the webhook URL and subscription. Twitter will send data to this endpoint (the webhook URL) as a `POST` request.\n\n## Why Twitivity?\n\n`Twitivity` does all the heavy lifting under the hood. All you have to do is to create an app and set up a dev environment. Run the application and concentrate on what's really important — building your app. \n\n* Performs challenge-response check validation\n* Registers webhook URL.\n* Subscribes to current user's context\n* Receives Twitter Account Activity in real-time\n\n## Usage\n\n[Ngrok](https://ngrok.com/) is a handy tool to try out the API locally, on your machine. Install and run ngrok and replace your app's URL and callback URL with the link ngrok provides. Make sure to use the one with `https`.\n\n```terminal\n~$ ./ngrok http 5000\n```\n### Stream events in real time.\n\n```python3\n# stream_events.py\n\n>>> from twitivity import Event\n>>> import json\n\n>>> class StreamEvent(Event):\n CALLBACK_URL: str = \"https://yourdomain.com/listener\"\n\n def on_data(self, data: json) -> None:\n # process data\n\n>>> stream_events = StreamEvent()\n>>> stream_events.listen()\n```\n\n## Configuration\n\nThe configuration below only has to be done once before running the application for the first time.\n\n\n#### Store the credentials as environment variables.\n\n[`App`](https://developer.twitter.com/en/apps) :arrow_right: `Details` :arrow_right: `Keys and Tokens`\n\n```\n~$ export consumer_key=API_KEY\n~$ export consumer_secret=API_SECRET_KEY\n~$ export access_token=ACCESS_TOKEN\n~$ export access_token_secret=ACCESS_TOKEN_SECRET\n~$ export env_name=ENV_NAME # this is the dev environment label name you choose.\n```\n\n#### Register & Subscribe\n\nTo register the webhook URL and subscribe to activities, run both programs in **parallel** \n(first `stream_events.py` then `configure.py`). This will register the webhook URL and subscribe to the user's activities.\n\n```python3\n# configure.py\n>>> from twitivity import Activity\n\n>>> account_activity = Activity()\n>>> account_activity.register_webhook(\"https://youdomain.com/listener\")\n>>> account_activity.subscribe()\n\n# Response\n{\n 'id': '1198870971131686912', # webhook id\n 'url': 'https://yourdomain.com/listener',\n 'valid': True,\n 'created_timestamp': '2019-11-25 07:48:08 +0000'\n}\n```\n\n## Installation\n\n```\n~$ pip3 install twitivity\n```\nSupported Versions: **Python 3.6**, **Python 3.7** and **Python 3.8**\n" }, { "alpha_fraction": 0.6320754885673523, "alphanum_fraction": 0.6501572132110596, "avg_line_length": 31.615385055541992, "blob_id": "f4f444bcc69ab574d81b10e4d994d323dd56139a", "content_id": "1223f992418912b6a9bd415544f46c07040fad06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "permissive", "max_line_length": 80, "num_lines": 39, "path": "/setup.py", "repo_name": "saadmanrafat/twitivity", "src_encoding": "UTF-8", "text": "from os.path import dirname, abspath, join\nfrom setuptools import setup\n\nNAME: str = \"twitivity\"\nAUTHOR: str = \"Saadman Rafat\"\nDESCRIPTION: str = \"Twitter Accounts Activity API Client Library for Python\"\nURL: str = \"https://github.com/saadmanrafat/twitivity\"\nREQUIRES_PYTHON: str = \">=3.6.0\"\nVERSION = \"0.1.2\"\nREQUIRED = [\"Flask==1.1.1\", \"requests==2.22.0\", \"tweepy==3.8.0\"]\nEMAIL = \"saadmanhere@gmail.com\"\n\nwith open(join(abspath(dirname(__file__)), \"README.md\"), encoding=\"utf-8\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=NAME,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n version=VERSION,\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n license=\"MIT\",\n install_requires=REQUIRED,\n include_package_data=True,\n py_modules=[\"twitivity\"],\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n" } ]
3
Monisha24J/snake_game
https://github.com/Monisha24J/snake_game
4f5803e9c85af0f1b66094e4aee518293619ad33
7619d101d37e6a612b1c40f42c4e087260dcbe4f
93516978e67a2a79de70bd5910367daca0957228
refs/heads/main
2023-06-07T14:14:09.657161
2021-06-30T18:21:04
2021-06-30T18:21:04
381,790,262
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.514843761920929, "alphanum_fraction": 0.5390625, "avg_line_length": 22.703702926635742, "blob_id": "6134357328af387ad18d0b30dc3c927709432b55", "content_id": "9fa147977474ba53f3579a2d27ca5082c2f22548", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1280, "license_type": "no_license", "max_line_length": 80, "num_lines": 54, "path": "/snake.py", "repo_name": "Monisha24J/snake_game", "src_encoding": "UTF-8", "text": "from turtle import Turtle\n\nUP = 90\nDOWN = 270\nRIGHT = 0\nLEFT = 180\n\n\nclass Snake:\n\n def __init__(self):\n self.segment = []\n self.create_snake()\n\n def create_snake(self):\n\n for i in range(3):\n self.add_segment(i)\n\n def add_segment(self, position):\n x = 0\n tim = Turtle(shape=\"square\")\n tim.color(\"white\")\n tim.penup()\n tim.goto(x, y=0)\n x -= 20\n self.segment.append(tim)\n\n def extend(self):\n self.add_segment(self.segment[-1].position())\n\n\n def move(self):\n for seg_num in range(len(self.segment) - 1, 0, -1): # (start,stop,step)\n new_x = self.segment[seg_num - 1].xcor()\n new_y = self.segment[seg_num - 1].ycor()\n self.segment[seg_num].goto(new_x, new_y)\n self.segment[0].forward(20)\n\n def up(self):\n if self.segment[0].heading() != DOWN:\n self.segment[0].setheading(UP)\n\n def down(self):\n if self.segment[0].heading() != UP:\n self.segment[0].setheading(DOWN)\n\n def right(self):\n if self.segment[0].heading() != LEFT:\n self.segment[0].setheading(RIGHT)\n\n def left(self):\n if self.segment[0].heading() != RIGHT:\n self.segment[0].setheading(LEFT)\n" } ]
1
wooda916/isc-work
https://github.com/wooda916/isc-work
d623dcbdc5059f8ca37692676f6d9f5da9d53369
5db5febd890a994054e40797e4b2ca10ac2feabf
e915973934f737f5e23473d0c09e8467d839fbe0
refs/heads/master
2021-01-21T10:29:21.442451
2017-03-02T16:57:12
2017-03-02T16:57:12
83,435,579
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6074073910713196, "alphanum_fraction": 0.644444465637207, "avg_line_length": 10.727272987365723, "blob_id": "3ac65ebbe0155835681f123f77b2226b55c676c8", "content_id": "28ecd7bf21756cdae3912063a468a7a7d0a92ae3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 41, "num_lines": 11, "path": "/python/numpy_arrays.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "import numpy as np\n\narr = np.array([range(4), range(10, 14)])\n\nprint arr.shape\n\nprint arr.size\n\nprint arr.max()\n\nprint arr.min()\n\n \n" }, { "alpha_fraction": 0.6009615659713745, "alphanum_fraction": 0.6105769276618958, "avg_line_length": 17.454545974731445, "blob_id": "c748894fca7ae95d73444566ca8ec97d88fb6e09", "content_id": "c47360146ccab0152f2a6a44454bbb46fe4ad5d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/python/input_output.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "print \"\\nExercise 1: \\n \"\n\nwith open(\"weather.csv\", \"r\") as reader:\n data = reader.read()\n\nprint data\n\nprint \"\\nExercise 2: \\n \"\n\nwith open(\"weather.csv\", \"r\") as reader:\n line = reader.readline()\n \n" }, { "alpha_fraction": 0.5061224699020386, "alphanum_fraction": 0.5387755036354065, "avg_line_length": 11.199999809265137, "blob_id": "a534b97fcba95e1963e2a9eb6140ac186faa86f0", "content_id": "0358acfb161ad7b272148481afb1e6e29bf84d5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 94, "num_lines": 20, "path": "/python/basics.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "b = 4\nc = 5\n\na = (b**2 + c**2)**.5\n\nprint a\n\nprint type(a)\nprint type(b)\nprint type(c)\n\na = int(a)\n\nprint a\n\nprint type(a)\n\nprint str(a) + \" squared equals \" + str(b) + str(\" squared plus \") + str(c) + str(\" squared \")\n\nprint a**2 == b**2+c**2\n\n" }, { "alpha_fraction": 0.42500001192092896, "alphanum_fraction": 0.5924999713897705, "avg_line_length": 14.384614944458008, "blob_id": "e8e99ed269fa2125a60d935d0a400e8818cf7c69", "content_id": "72b1ce25d25a40b54735e1966fb17ef86a3f31a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 49, "num_lines": 26, "path": "/python/matplotlib_multi_axis.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\n\nfig, ax1 = plt.subplots()\n\ntimes = range(7)\nco2 = 250, 256, 272, 260, 300, 320, 389\n\nax1.plot(times, co2, \"b--\")\nax1.set_ylabel('[$CO_2$]')\nax2 = ax1.twinx()\n\ntemp = [14.1, 15.5, 16.3, 18.1, 17.3, 19.1, 20.2]\n\nax2.set_ylabel(\"Temp (degC)\")\n\nplt.show()\n\n#ex2\n\nplt.subplot (1, 3, 1)\nx = range(0, 10, 1)\nplt.plot(x)\n\nplt.subplot(1, 3, 2)\ny = range(10, 0, -1)\nplt.plt(y)\n" }, { "alpha_fraction": 0.5259259343147278, "alphanum_fraction": 0.5444444417953491, "avg_line_length": 21, "blob_id": "193027421f2abeb4a449fabb5454df9099018b19", "content_id": "70b2a70435178bfd26f809adb6bd20f6d3db2d1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/python/strings.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "s = \"I {0} to write \\\n{1}\".format(\"LOVE\", \"python\")\nprint s\n\nsplit_s = s.split()\n\nprint split_s\n\nfor word in split_s:\n if word.find(\"o\") > -1:\n print \"Well, {0} has an 'o' in it, innit!\".format(word)\n print \"yes it does, look - {0}\".format(word)\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.37015780806541443, "alphanum_fraction": 0.4892396032810211, "avg_line_length": 16.375, "blob_id": "b77f5cacd0b34c3ce159fbf46f82cbc6ccfd6cec", "content_id": "1819f0598145d57742105181a35ebd07f8a7db7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "no_license", "max_line_length": 55, "num_lines": 40, "path": "/python/numpy_intro.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "import numpy as np\n\nx = range(1,11)\n\na1 = np.array(x, np.int32)\n\na2 = np.array(x, np.float64)\n\nprint a1.dtype\n\nprint a2.dtype\n\n\nprint \"\\nex2\\n\"\n\narr1 = np.zeros((3, 1, 4, 5))\narr2 = np.ones((2, 3, 4))\narr3 = np.arange((1000))\n\nprint \"this is array 1: \\n\", arr1, \"\\n\"\nprint \"this is array 2: \\n\", arr2, \"\\n\"\nprint \"this is array 3: \\n\", \"arr3 would be here\", \"\\n\"\n\nprint \"=========================================\"\nprint \"\\nex3::\\n\"\n\na = np.array([2, 3.2, 5.5, -6.4, -2.2, 2.4])\n\nprint a\nprint a[1]\nprint a[1:4]\n\na = np.array([[2, 3.2, 5.5, -6.4, -2.2, 2.4],\n [1, 22, 4, 0.1, 5.3, -9],\n [3, 1, 2.1, 21, 1.1, -2]])\n\nprint \"\\nthis is a[:, 3]\"\nprint a[:, 3]\n\nprint (38/31)*5\n\n\n" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 20, "blob_id": "f88a8fd3d66ca1c0c46877ffc79e4ea3047fe9df", "content_id": "44ca340a618ee04d958c3d9defb1874eb91c2e82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/python/truefalse.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "print False == False\n\n\n" }, { "alpha_fraction": 0.5841836929321289, "alphanum_fraction": 0.6224489808082581, "avg_line_length": 12.736842155456543, "blob_id": "eca2e32e04e1538669d3744e06439deda6a95b0a", "content_id": "798fa984e0ce2ead86443efe390415aa986b1801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "no_license", "max_line_length": 53, "num_lines": 57, "path": "/python/lists.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "n = 1\n\nprint \"Exercise 1: \\n \" \n\nmylist = [1, 2, 3, 4, 5]\n\nprint mylist [1]\n\nprint mylist [-2]\n\nprint mylist [2:]\n\nprint mylist [1:3]\n\nprint \"\\nExercise 2: \\n \"\n\none_to_ten = range(1,11)\n\nprint one_to_ten\n\none_to_ten [0] = 10\n\nprint one_to_ten\n\none_to_ten.append(11)\n\nprint one_to_ten\n\none_to_ten.extend([12,13,14])\n\nprint one_to_ten\n\nprint \"\\nExercise 3: \\n \"\n\nforward = []\nbackward = []\n\nvalues = [\"a\", \"b\", \"c\"]\n\nfor thing in values:\n forward.append(thing)\n backward.insert(0, thing)\n\nprint \"forward is: %s\" % forward\nprint \"backward is: %s\" % backward\n\nforward.reverse()\n\nprint \"forward (reversed) is: %s\" % forward\n\nprint \"does forward = backward?\", forward == backward\n\nprint \"\\nExercise 4: \\n \"\n\ncountries = [\"uk\", \"usa\", \"uk\", \"uae\"]\n\nprint countries.count(\"uk\"), \"\\n\"\n\n" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.5210084319114685, "avg_line_length": 9.34782600402832, "blob_id": "8a8223b5e596a63864576a77da65ca6c43aba1b9", "content_id": "d5b4470c0779dec8635d4c327cfce2dc7288f6f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 39, "num_lines": 23, "path": "/python/arraycalcs.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "import numpy as np\n\na = np.array([range(4), range(10, 14)])\n\nb = np.array([2, -1, 1, 0])\n\nprint \"\\n\", a, \"\\n\"\n\nprint b\n\n\nprint \"\\nThis is a x b: \"\n \nprint a * b\n\nb1 = b * 100\nb2 = b * 100.0\n\nprint \"\\n\", b1\n\nprint \"\\n\", b2\n\nprint b1 == b2\n" }, { "alpha_fraction": 0.5559380650520325, "alphanum_fraction": 0.6041308045387268, "avg_line_length": 15.514286041259766, "blob_id": "5c8ec140edec68bbb1e59893a970e78f11e8512f", "content_id": "cc178549215115b44183d092ed7e0c569e822084", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 59, "num_lines": 35, "path": "/python/tuples.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "print \"Exercise 1: \\n\"\n\nt = (1,)\n\nprint t[-1]\n\nr1 = range(100,201)\n\nprint \"r1 = \", r1\n\nt1 = tuple(r1)\n\nprint \"tuple value 1 = \", t1[0], \"tuple value 2 = \", t1[-1]\n\nprint \"\\nExercise 2: \\n \"\n\nmylist = [23, \"hi\", 2.4e-10]\n\nfor item in mylist:\n print \"%s (index: %s)\" % (item, mylist.index(item))\n\nprint \"\\nconvert to enumerate\\n\"\n\nfor (count, item) in enumerate(mylist):\n print \"index:\", count, \"item:\", item\n\nprint \"\\nExercise 3: \\n \"\n\n(first, middle, last) = mylist\n\nprint first,\"\\n\", middle, \"\\n\",last\n\nfirst, middle, last = middle, last, first\n\nprint first,middle,last\n\n\n\n" }, { "alpha_fraction": 0.4801980257034302, "alphanum_fraction": 0.6237623691558838, "avg_line_length": 11.625, "blob_id": "38af80abe13a028871b1f4a656bef7cc68ba231a", "content_id": "ba1565fbf360539088aecdd34cbd35baea113a76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/python/matplotlibP_ex1.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\n\n#plt.plot(range(10))\n\n#plt.pause(0.1)\n\n#plt.show()\n\nprint \"\\nex2\\n\"\n\ntimes = range(7)\nco2 = [250, 265, 272, 260, 300, 320, 389]\n\nplt.plot(times, co2, 'b--')\n\nplt.show()\n" }, { "alpha_fraction": 0.2800000011920929, "alphanum_fraction": 0.5, "avg_line_length": 5.857142925262451, "blob_id": "4fc2de04058cd898a88213abe28337f4dc8d6d49", "content_id": "056810102bb78c370ffe0efd6cc1d49aa830cf6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 14, "num_lines": 7, "path": "/python/TV.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "a = 30 - 18.5\n\nprint a\n\nb = 31.25 - 11\n\nprint b\n\n\n" }, { "alpha_fraction": 0.5080385804176331, "alphanum_fraction": 0.5434083342552185, "avg_line_length": 9.655172348022461, "blob_id": "7ae0363d54af3216b0c83df9b65e34a0818fa2f2", "content_id": "43841cbdc0d38901c218bd55a9577699e80b2ab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 21, "num_lines": 29, "path": "/python/control.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "num_moons = 3\n\nprint \"before\"\n\nwhile num_moons>-0:\n print num_moons\n num_moons -=1\n\nprint \"after\"\n\n\nmoons = 3\n\nif moons < 0:\n print \"less\"\nelif moons == 0:\n print \"equal\"\nelse:\n print \"greater\"\n\nprint \"\\n\"\n\nnum = 0\nwhile num <= 0\n if (num %2) == 1:\n print num\n num +=1\n\nprint \"\\n\"\n\n\n" }, { "alpha_fraction": 0.6157518029212952, "alphanum_fraction": 0.6658711433410645, "avg_line_length": 10.270270347595215, "blob_id": "822ffe586d5564525a38d9fce110969c799d3b49", "content_id": "32bbbfc2cbe3812ca314f1200b5e0f0a2e6a3d50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 52, "num_lines": 37, "path": "/python/numpy_miss_values.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "import numpy.ma as MA\n\nmarr = MA.masked_array(range(10), fill_value = -999)\n\nprint marr, marr.fill_value\n\nmarr[2] = MA.masked\n\nprint marr\n\nprint marr.mask\n\nnarr = MA.masked_where(marr > 6, marr)\n\nprint narr\n\nprint narr.fill_value\n\nx = MA.filled(narr)\n\nprint x\n\nprint type(x)\n\nprint \"\\nex2:\\n\"\n\nm1 = MA.masked_array(range(1,9))\n\nprint m1\n\nm2 = m1.reshape(2, 4)\n\nprint m2\n\nm3 = MA.masked_greater(m2, 6)\n\nprint \"\\n\", m3\n\n\n" }, { "alpha_fraction": 0.4711538553237915, "alphanum_fraction": 0.5192307829856873, "avg_line_length": 5.058823585510254, "blob_id": "ffbaa1c080260df70e1d2bc9156060f951f710d3", "content_id": "733db462697dc7f3bed7bcb715623c8fa3b526d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 13, "num_lines": 17, "path": "/python/aliasing.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "a = [0, 1, 2]\n\nb = a\n\nprint a, b\n\nb[1]=\"hello!\"\n\nprint b\n\nprint a, b\n\na.append(3)\n\nprint a\n\nprint a, b\n\n" }, { "alpha_fraction": 0.5121951103210449, "alphanum_fraction": 0.5731707215309143, "avg_line_length": 17, "blob_id": "7b2b3e7875a64e3ac0d610f8ab14d39a23347892", "content_id": "b3134025b02d6080065a4ad6d60fcc89310cb3b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/python/l_s_ex.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "age = 23\nname = 'jemma'\nheight = 1.63\n\nif name == 'jemma' and age >= 23 and height >= 1.63:\n print \"yep\"\n\nif not name == \"Hannah\":\n print \"Not allowed in.\"\n\n\n" }, { "alpha_fraction": 0.5316455960273743, "alphanum_fraction": 0.5611814260482788, "avg_line_length": 18.70833396911621, "blob_id": "c625082af7136c5a6c52ba61db0e01d205cb9926", "content_id": "08062892c7e1992591accc3ffcc0789be4007f90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 66, "num_lines": 24, "path": "/python/functions.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "def double_it (number):\n return 2 * number\n\nprint double_it (2)\n\nprint double_it(2.5)\n\nprint double_it (\"oi \")\n\nprint double_it(\"\\n\")\n\ndef calc_hypo (a, b):\n if type(a) not in (int, float) or type(b) not in (int, float):\n print \"Bad Argument!\"\n return False\n if a <= 0 or b <= 0:\n print \"Bad Argument (you numpty!)\"\n return False\n hypo = ((a**2.0)+(b**2.0))**0.5\n return hypo\n\nprint calc_hypo (1, 2)\n\nprint calc_hypo (\"hi\", \"bi\")\n\n" }, { "alpha_fraction": 0.5844155550003052, "alphanum_fraction": 0.5844155550003052, "avg_line_length": 14, "blob_id": "407068359a201a657a587fdeec4ebf8a55777aaa", "content_id": "2dacc25f5fbec092e624dc8d2357276ab9c4b9a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 43, "num_lines": 5, "path": "/python/quick_ex.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "s = \"Hello World\"\n\nprint s.title()\n\nprint s.replace (\"World\", \"people\").upper()\n\n\n" }, { "alpha_fraction": 0.5524126291275024, "alphanum_fraction": 0.5757071375846863, "avg_line_length": 14.35897445678711, "blob_id": "33314c2d5f0bd58c2c153d6b6b7564a74e1b4df0", "content_id": "fcc4b414bd54694f86c4f26ead0983f2e9a7176e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 49, "num_lines": 39, "path": "/python/sets_dicts.py", "repo_name": "wooda916/isc-work", "src_encoding": "UTF-8", "text": "a = set([0, 1, 2, 3, 4, 5])\nb = set([2, 4, 6, 8])\n\nprint a.union(b)\n\nprint a.intersection(b)\n\nprint \"\\nExercise 2\\n\"\n\nband = [\"mel\", \"geri\", \"victoria\", \"mel\", \"emma\"]\n\ncounts = {}\n\nfor name in band:\n if name not in counts:\n counts[name] = 1\n else:\n counts[name] += 1\n\nfor name in counts:\n print name, counts[name]\n\nprint \"\\nExercise 3\\n\"\n\nif {}: print 'hi'\n\nd = {\"maggie\": \"uk\", \"ronnie\": \"usa\"}\n\nprint d.items()\nprint d.keys()\nprint d.values()\n\nprint d.get(\"maggie\", \"nowhere\")\n\nprint d.get(\"turd\", \"nowhere\")\n\nres = d.setdefault(\"mikhail\", \"ussr\")\n\nprint res, d[\"mikhail\"]\n\n\n" } ]
19
u2takey/tensorflow
https://github.com/u2takey/tensorflow
f4ca7c747e57f36694d3c97140b54544e8482a9c
ccc995b3041b5da710058a0875c95ed11a650310
a3adc3b367a89c5f3355886f748cd78674327d18
refs/heads/master
2021-01-24T17:14:11.853217
2018-06-29T09:47:06
2018-06-29T09:47:06
123,227,112
1
0
Apache-2.0
2018-02-28T04:02:47
2018-02-28T03:59:47
2018-02-28T00:53:55
null
[ { "alpha_fraction": 0.6396163105964661, "alphanum_fraction": 0.6562094688415527, "avg_line_length": 32.83333206176758, "blob_id": "11f002ab2235cdfe476d6b2e873a5e5bd4d1ef4d", "content_id": "166002ca7faa41429fcea66ac21b2c9d481360ae", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3857, "license_type": "permissive", "max_line_length": 80, "num_lines": 114, "path": "/tensorflow/contrib/control_flow/python/cond_v2_test.py", "repo_name": "u2takey/tensorflow", "src_encoding": "UTF-8", "text": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for cond_v2.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.control_flow.python import cond_v2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass NewCondTest(test.TestCase):\n\n def _testCond(self, true_fn, false_fn, train_vals):\n pred = array_ops.placeholder(dtypes.bool, name=\"pred\")\n\n expected = control_flow_ops.cond(pred, true_fn, false_fn, name=\"expected\")\n actual = cond_v2.cond_v2(pred, true_fn, false_fn, name=\"actual\")\n\n expected_grad = gradients_impl.gradients(expected, train_vals)\n actual_grad = gradients_impl.gradients(actual, train_vals)\n\n with self.test_session() as sess:\n expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(\n (expected, actual, expected_grad, actual_grad), {pred: True})\n self.assertEqual(expected_val, actual_val)\n self.assertEqual(expected_grad_val, actual_grad_val)\n\n expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(\n (expected, actual, expected_grad, actual_grad), {pred: False})\n self.assertEqual(expected_val, actual_val)\n self.assertEqual(expected_grad_val, actual_grad_val)\n\n def testBasic(self):\n x = constant_op.constant(1.0, name=\"x\")\n y = constant_op.constant(2.0, name=\"y\")\n\n def true_fn():\n return x * 2.0\n\n def false_fn():\n return y * 3.0\n\n self._testCond(true_fn, false_fn, [x])\n self._testCond(true_fn, false_fn, [x, y])\n self._testCond(true_fn, false_fn, [y])\n\n def testBasic2(self):\n x = constant_op.constant(1.0, name=\"x\")\n y = constant_op.constant(2.0, name=\"y\")\n\n def true_fn():\n return x * y * 2.0\n\n def false_fn():\n return 2.0\n\n self._testCond(true_fn, false_fn, [x])\n self._testCond(true_fn, false_fn, [x, y])\n self._testCond(true_fn, false_fn, [y])\n\n def testSecondDerivative(self):\n self.skipTest(\"b/109758172\")\n pred = array_ops.placeholder(dtypes.bool, name=\"pred\")\n x = constant_op.constant(3.0, name=\"x\")\n\n def true_fn():\n return math_ops.pow(x, 3)\n\n def false_fn():\n return x\n\n cond = cond_v2.cond_v2(pred, true_fn, false_fn, name=\"cond\")\n cond_grad = gradients_impl.gradients(cond, [x])\n cond_grad_grad = gradients_impl.gradients(cond_grad, [x])\n\n with self.test_session() as sess:\n # d[x^3]/dx = 3x^2\n true_val = sess.run(cond_grad, {pred: True})\n self.assertEqual(true_val, [27.0])\n # d[x]/dx = 1\n false_val = sess.run(cond_grad, {pred: False})\n self.assertEqual(false_val, [1.0])\n\n true_val = sess.run(cond_grad_grad, {pred: True})\n # d2[x^3]/dx2 = 6x\n self.assertEqual(true_val, [18.0])\n false_val = sess.run(cond_grad_grad, {pred: False})\n # d2[x]/dx2 = 0\n self.assertEqual(false_val, [0.0])\n\n\nif __name__ == \"__main__\":\n test.main()\n" }, { "alpha_fraction": 0.6662492156028748, "alphanum_fraction": 0.6825297474861145, "avg_line_length": 38.92499923706055, "blob_id": "41d365197b0dacc5ec485e2f73e22417a6f969c4", "content_id": "55edf76fcd3eed461e1465b569e1c2e9e2facbc0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1597, "license_type": "permissive", "max_line_length": 80, "num_lines": 40, "path": "/tensorflow/contrib/periodic_resample/ops/array_ops_test.cc", "repo_name": "u2takey/tensorflow", "src_encoding": "UTF-8", "text": "/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n==============================================================================*/\n\n#include \"tensorflow/core/framework/node_def_builder.h\"\n#include \"tensorflow/core/framework/shape_inference_testutil.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/test.h\"\n\nnamespace tensorflow {\n\nTEST(ArrayOpsTest, PeriodicResample_ShapeFn) {\n ShapeInferenceTestOp op(\"PeriodicResample\");\n // Case 1: output shape can be fully inferreed.\n PartialTensorShape shape({4, 4, -1});\n TensorShapeProto shape_proto;\n shape.AsProto(&shape_proto);\n\n TF_ASSERT_OK(NodeDefBuilder(\"test\", \"PeriodicResample\")\n .Input({\"values\", 0, DT_INT32})\n .Attr(\"shape\", shape_proto)\n .Finalize(&op.node_def));\n INFER_OK(op, \"[2,2,4]\", \"[4,4,1]\");\n // Case 2: output shape can not be inferred - report desired shape.\n INFER_OK(op, \"[2,2,?]\", \"[4,4,?]\");\n}\n\n} // end namespace tensorflow\n" }, { "alpha_fraction": 0.7177502512931824, "alphanum_fraction": 0.7218782305717468, "avg_line_length": 39.375, "blob_id": "1473eb3cac62141659cfa5080a253969062f3c08", "content_id": "b7a8177c44c88217560fb7f72c77d3ac1aa0c9ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1938, "license_type": "permissive", "max_line_length": 80, "num_lines": 48, "path": "/tensorflow/contrib/autograph/impl/special_functions.py", "repo_name": "u2takey/tensorflow", "src_encoding": "UTF-8", "text": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Special functions that only make sense for AutoGraph.\n\nThese functions are meant to ensure feature parity between Python and AutoGraph,\nso that the exact same code works in both modes. In general, AutoGraph will\nreplace these calls.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.autograph.operators import data_structures\n\n\ndef stack(list_or_tensor, element_dtype=None):\n \"\"\"Stacks the input, if it admits the notion of stacking. No-op otherwise.\n\n For example, a list of tensors can be stacked into a larger tensor. This\n function is similar to tf.stack, but it accepts non-lists and lists of\n non-tensors as arguments. In the latter case, the function does nothing.\n\n Args:\n list_or_tensor: Any entity.\n element_dtype: Optional dtype for the elements in the list. Required if the\n input is stackable, and the list is untyped.\n\n Returns:\n If the input is stackable, a new object representing the stacked inputs.\n Otherwise it returns list_or_tensor unchanged.\n \"\"\"\n return data_structures.list_stack(\n list_or_tensor,\n data_structures.ListStackOpts(\n element_dtype=element_dtype, original_call=lambda x: x))\n" }, { "alpha_fraction": 0.6605818867683411, "alphanum_fraction": 0.6628636717796326, "avg_line_length": 33.3725471496582, "blob_id": "1c168421a73f62187fbca5cce54a93abc0220e5e", "content_id": "e28f18833616797c8d0eb62ebfb9e1001df16093", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3506, "license_type": "permissive", "max_line_length": 80, "num_lines": 102, "path": "/tensorflow/core/kernels/data/identity_dataset_op.cc", "repo_name": "u2takey/tensorflow", "src_encoding": "UTF-8", "text": "/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n==============================================================================*/\n#include <map>\n\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/kernels/data/dataset.h\"\n\nnamespace tensorflow {\nnamespace {\n\n// The purpose of identity dataset is to serve as a placeholder when performing\n// optimizations. It is not expected to be surfaced in the Python API.\nclass IdentityDatasetOp : public UnaryDatasetOpKernel {\n public:\n explicit IdentityDatasetOp(OpKernelConstruction* ctx)\n : UnaryDatasetOpKernel(ctx) {\n OP_REQUIRES_OK(ctx, ctx->GetAttr(\"output_types\", &output_types_));\n OP_REQUIRES_OK(ctx, ctx->GetAttr(\"output_shapes\", &output_shapes_));\n }\n\n protected:\n void MakeDataset(OpKernelContext* ctx, DatasetBase* input,\n DatasetBase** output) override {\n *output = new Dataset(ctx, input);\n }\n\n private:\n class Dataset : public GraphDatasetBase {\n public:\n Dataset(OpKernelContext* ctx, const DatasetBase* input)\n : GraphDatasetBase(ctx), input_(input) {\n input_->Ref();\n }\n\n ~Dataset() override { input_->Unref(); }\n\n std::unique_ptr<IteratorBase> MakeIteratorInternal(\n const string& prefix) const override {\n return std::unique_ptr<IteratorBase>(\n new Iterator({this, strings::StrCat(prefix, \"::Identity\")}));\n }\n\n const DataTypeVector& output_dtypes() const override {\n return input_->output_dtypes();\n }\n\n const std::vector<PartialTensorShape>& output_shapes() const override {\n return input_->output_shapes();\n }\n\n string DebugString() const override { return \"IdentityDatasetOp::Dataset\"; }\n\n protected:\n Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b,\n Node** output) const override {\n Node* input_graph_node = nullptr;\n TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));\n TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node}, output));\n return Status::OK();\n }\n\n private:\n class Iterator : public DatasetIterator<Dataset> {\n public:\n explicit Iterator(const Params& params)\n : DatasetIterator<Dataset>(params) {}\n\n Status Initialize(IteratorContext* ctx) override {\n return errors::Unimplemented(strings::StrCat(prefix(), \"::Initialize\"));\n }\n\n Status GetNextInternal(IteratorContext* ctx,\n std::vector<Tensor>* out_tensors,\n bool* end_of_sequence) override {\n return errors::Unimplemented(\n strings::StrCat(prefix(), \"::GetNextInternal\"));\n }\n };\n\n const DatasetBase* const input_;\n };\n\n DataTypeVector output_types_;\n std::vector<PartialTensorShape> output_shapes_;\n};\n\nREGISTER_KERNEL_BUILDER(Name(\"IdentityDataset\").Device(DEVICE_CPU),\n IdentityDatasetOp);\n} // namespace\n} // namespace tensorflow\n" }, { "alpha_fraction": 0.40546363592147827, "alphanum_fraction": 0.5654946565628052, "avg_line_length": 67.24705505371094, "blob_id": "7225a6e3b519933a44eb39775f609c2b6cded778", "content_id": "e6f333aa5bb11449d5bf5d6c60cf77088649df8c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11604, "license_type": "permissive", "max_line_length": 187, "num_lines": 170, "path": "/tensorflow/contrib/lite/tools/benchmark/README.md", "repo_name": "u2takey/tensorflow", "src_encoding": "UTF-8", "text": "# TFLite Model Benchmark Tool\n\n## Description\n\nA simple C++ binary to benchmark a TFLite model and its individual operators,\nboth on desktop machines and on Android.\n\n## To build/install/run\n\n### On Android:\n\n(0) Refer to https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android to edit the `WORKSPACE` to configure the android NDK/SDK.\n\n(1) Build for your specific platform, e.g.:\n\n```\nbazel build -c opt \\\n --config=android_arm \\\n --cxxopt='--std=c++11' \\\n tensorflow/contrib/lite/tools/benchmark:benchmark_model\n```\n\n(2) Connect your phone. Push the binary to your phone with adb push\n (make the directory if required):\n\n```\nadb push bazel-bin/tensorflow/contrib/lite/tools/benchmark/benchmark_model /data/local/tmp\n```\n\n(3) Make the binary executable.\n\n```\nadb shell chmod +x /data/local/tmp/benchmark_model\n```\n\n(4) Push the compute graph that you need to test. For example:\n\n```\nadb push mobilenet_quant_v1_224.tflite /data/local/tmp\n```\n\n(5) Run the benchmark. For example:\n\n```\nadb shell /data/local/tmp/benchmark_model \\\n --graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \\\n --input_layer=\"Placeholder\" \\\n --input_layer_shape=\"1,224,224,3\" \\\n --input_layer_type=\"uint8\" \\\n --output_layer=\"MobilenetV1/Predictions/Reshape_1\" \\\n --num_threads=4\n```\n\n### On desktop:\n(1) build the binary\n\n```\nbazel build -c opt tensorflow/contrib/lite/tools/benchmark:benchmark_model\n```\n\n(2) Run on your compute graph, similar to the Android case but without the need of adb shell.\nFor example:\n\n```\nbazel-bin/tensorflow/contrib/lite/tools/benchmark/benchmark_model \\\n --graph=mobilenet_quant_v1_224.tflite \\\n --input_layer=\"Placeholder\" \\\n --input_layer_shape=\"1,224,224,3\" \\\n --input_layer_type=\"uint8\" \\\n --output_layer=\"MobilenetV1/Predictions/Reshape_1\" \\\n --num_threads=4\n```\n\nThe MobileNet graph used as an example here may be downloaded from\nhttps://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip\n\n## Profiling model operators\nThe benchmark model binary also allows you to profile operators and give execution times of each operator. To do this,\ncompile the binary with a compiler flag that enables profiling to be compiled in. Pass **--copt=-DTFLITE_PROFILING_ENABLED**\nto compile benchmark with profiling support.\nFor example, to compile with profiling support on Android, add this flag to the previous command:\n\n```\nbazel build -c opt \\\n --config=android_arm \\\n --cxxopt='--std=c++11' \\\n --copt=-DTFLITE_PROFILING_ENABLED \\\n tensorflow/contrib/lite/tools/benchmark:benchmark_model\n```\nThis compiles TFLite with profiling enabled, now you can run the benchmark binary like before. The binary will produce detailed statistics for each operation similar to those shown below:\n\n```\n\n============================== Run Order ==============================\n\t [node type]\t [start]\t [first]\t [avg ms]\t [%]\t [cdf%]\t [mem KB]\t[times called]\t[Name]\n\t CONV_2D\t 0.000\t 9.132\t 9.132\t 0.121%\t 0.121%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_0/Relu6]\n\t DEPTHWISE_CONV_2D\t 9.135\t 3.280\t 3.280\t 0.043%\t 0.165%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_1_depthwise/Relu6]\n\t CONV_2D\t 12.419\t 6.877\t 6.877\t 0.091%\t 0.256%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 19.299\t 1.708\t 1.708\t 0.023%\t 0.278%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_2_depthwise/Relu6]\n\t CONV_2D\t 21.012\t 4.162\t 4.162\t 0.055%\t 0.334%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 25.177\t 3.520\t 3.520\t 0.047%\t 0.380%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_3_depthwise/Relu6]\n\t CONV_2D\t 28.701\t 10.218\t 10.218\t 0.136%\t 0.516%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 38.922\t 0.827\t 0.827\t 0.011%\t 0.527%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_4_depthwise/Relu6]\n\t CONV_2D\t 39.752\t 1.401\t 1.401\t 0.019%\t 0.545%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 41.156\t 1.290\t 1.290\t 0.017%\t 0.563%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_5_depthwise/Relu6]\n\t CONV_2D\t 42.448\t 5.995\t 5.995\t 0.080%\t 0.642%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 48.445\t 0.409\t 0.409\t 0.005%\t 0.647%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_6_depthwise/Relu6]\n\t CONV_2D\t 48.856\t 6.167\t 6.167\t 0.082%\t 0.729%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 55.026\t 0.629\t 0.629\t 0.008%\t 0.738%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_7_depthwise/Relu6]\n\t CONV_2D\t 55.656\t 6.464\t 6.464\t 0.086%\t 0.823%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 62.124\t 0.647\t 0.647\t 0.009%\t 0.832%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_8_depthwise/Relu6]\n\t CONV_2D\t 62.774\t 14.666\t 14.666\t 0.195%\t 1.026%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 77.444\t 0.635\t 0.635\t 0.008%\t 1.035%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_9_depthwise/Relu6]\n\t CONV_2D\t 78.081\t 7.186\t 7.186\t 0.095%\t 1.130%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 85.270\t 0.646\t 0.646\t 0.009%\t 1.139%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_10_depthwise/Relu6]\n\t CONV_2D\t 85.918\t 9.529\t 9.529\t 0.126%\t 1.265%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 95.451\t 0.628\t 0.628\t 0.008%\t 1.273%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_11_depthwise/Relu6]\n\t CONV_2D\t 96.081\t 2.077\t 2.077\t 0.028%\t 1.301%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 98.162\t 0.168\t 0.168\t 0.002%\t 1.303%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_12_depthwise/Relu6]\n\t CONV_2D\t 98.332\t 1.007\t 1.007\t 0.013%\t 1.317%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 99.342\t 0.288\t 0.288\t 0.004%\t 1.320%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_13_depthwise/Relu6]\n\t CONV_2D\t 99.632\t 8.197\t 8.197\t 0.109%\t 1.429%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6]\n\t AVERAGE_POOL_2D\t 107.832\t 0.045\t 0.045\t 0.001%\t 1.430%\t 0.000\t 0\t[MobilenetV1/Logits/AvgPool_1a/AvgPool]\n\t CONV_2D\t 107.878\t 0.325\t 0.325\t 0.004%\t 1.434%\t 0.000\t 0\t[MobilenetV1/Logits/Conv2d_1c_1x1/BiasAdd]\n\t RESHAPE\t 108.206\t 0.003\t 0.003\t 0.000%\t 1.434%\t 0.000\t 0\t[MobilenetV1/Predictions/Reshape]\n\t SOFTMAX\t 108.211\t 0.038\t 0.038\t 0.001%\t 1.434%\t 0.000\t 0\t[MobilenetV1/Predictions/Softmax]\n\n============================== Top by Computation Time ==============================\n\t [node type]\t [start]\t [first]\t [avg ms]\t [%]\t [cdf%]\t [mem KB]\t[times called]\t[Name]\n\t CONV_2D\t 62.774\t 14.666\t 14.666\t 0.195%\t 0.195%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6]\n\t CONV_2D\t 28.701\t 10.218\t 10.218\t 0.136%\t 0.330%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6]\n\t CONV_2D\t 85.918\t 9.529\t 9.529\t 0.126%\t 0.456%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6]\n\t CONV_2D\t 0.000\t 9.132\t 9.132\t 0.121%\t 0.578%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_0/Relu6]\n\t CONV_2D\t 99.632\t 8.197\t 8.197\t 0.109%\t 0.686%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6]\n\t CONV_2D\t 78.081\t 7.186\t 7.186\t 0.095%\t 0.782%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6]\n\t CONV_2D\t 12.419\t 6.877\t 6.877\t 0.091%\t 0.873%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6]\n\t CONV_2D\t 55.656\t 6.464\t 6.464\t 0.086%\t 0.958%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6]\n\t CONV_2D\t 48.856\t 6.167\t 6.167\t 0.082%\t 1.040%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6]\n\t CONV_2D\t 42.448\t 5.995\t 5.995\t 0.080%\t 1.120%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6]\n\n============================== Top by Memory Use ==============================\n\t [node type]\t [start]\t [first]\t [avg ms]\t [%]\t [cdf%]\t [mem KB]\t[times called]\t[Name]\n\t SOFTMAX\t 108.211\t 0.038\t 0.038\t 0.001%\t 0.001%\t 0.000\t 0\t[MobilenetV1/Predictions/Softmax]\n\t RESHAPE\t 108.206\t 0.003\t 0.003\t 0.000%\t 0.001%\t 0.000\t 0\t[MobilenetV1/Predictions/Reshape]\n\t CONV_2D\t 78.081\t 7.186\t 7.186\t 0.095%\t 0.096%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 77.444\t 0.635\t 0.635\t 0.008%\t 0.104%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_9_depthwise/Relu6]\n\t CONV_2D\t 62.774\t 14.666\t 14.666\t 0.195%\t 0.299%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 62.124\t 0.647\t 0.647\t 0.009%\t 0.307%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_8_depthwise/Relu6]\n\t CONV_2D\t 55.656\t 6.464\t 6.464\t 0.086%\t 0.393%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 55.026\t 0.629\t 0.629\t 0.008%\t 0.401%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_7_depthwise/Relu6]\n\t CONV_2D\t 48.856\t 6.167\t 6.167\t 0.082%\t 0.483%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6]\n\t DEPTHWISE_CONV_2D\t 48.445\t 0.409\t 0.409\t 0.005%\t 0.489%\t 0.000\t 0\t[MobilenetV1/MobilenetV1/Conv2d_6_depthwise/Relu6]\n\nNumber of nodes executed: 31\n============================== Summary by node type ==============================\n\t [Node type]\t [count]\t [avg ms]\t [avg %]\t [cdf %]\t [mem KB]\t[times called]\n\t CONV_2D\t 15\t 1.861\t 86.679%\t 86.679%\t 0.000\t 0\n\t DEPTHWISE_CONV_2D\t 13\t 0.286\t 13.321%\t 100.000%\t 0.000\t 0\n\t SOFTMAX\t 1\t 0.000\t 0.000%\t 100.000%\t 0.000\t 0\n\t RESHAPE\t 1\t 0.000\t 0.000%\t 100.000%\t 0.000\t 0\n\t AVERAGE_POOL_2D\t 1\t 0.000\t 0.000%\t 100.000%\t 0.000\t 0\n\nTimings (microseconds): count=50 first=108164 curr=128308 min=102850 max=197072 avg=150805 std=24368\nMemory (bytes): count=0\n31 nodes observed\n\n\nAverage inference timings in us: Warmup: 135310, Init: 12123, no stats: 150988\n\n```\n\n\n" }, { "alpha_fraction": 0.6270560622215271, "alphanum_fraction": 0.6612957119941711, "avg_line_length": 35.32926940917969, "blob_id": "faea569a42c8b4a6199696e93a4656c4ac9ab729", "content_id": "ff2e721423f07889f36746a2889afcc3369f28fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2979, "license_type": "permissive", "max_line_length": 80, "num_lines": 82, "path": "/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc", "repo_name": "u2takey/tensorflow", "src_encoding": "UTF-8", "text": "/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n==============================================================================*/\n#include \"tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.h\"\n#include <gtest/gtest.h>\n#include \"tensorflow/contrib/lite/interpreter.h\"\n#include \"tensorflow/contrib/lite/kernels/test_util.h\"\n#include \"tensorflow/contrib/lite/model.h\"\n\nnamespace tflite {\nnamespace {\n\nusing ::testing::ElementsAreArray;\n\nclass FloatAddOpModel : public SingleOpModel {\n public:\n FloatAddOpModel(const TensorData& input1, const TensorData& input2,\n const TensorData& output,\n ActivationFunctionType activation_type) {\n this->SetApplyDelegate([](Interpreter* interpreter) {\n interpreter->ModifyGraphWithDelegate(NnApiDelegate());\n });\n input1_ = AddInput(input1);\n input2_ = AddInput(input2);\n output_ = AddOutput(output);\n SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,\n CreateAddOptions(builder_, activation_type).Union());\n BuildInterpreter({GetShape(input1_), GetShape(input2_)});\n }\n\n int input1() { return input1_; }\n int input2() { return input2_; }\n\n std::vector<float> GetOutput() { return ExtractVector<float>(output_); }\n\n protected:\n int input1_;\n int input2_;\n int output_;\n};\n\n// Do a test with the NN API using no activation.\nTEST(NNAPIDelegate, AddWithNoActivation) {\n FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},\n {TensorType_FLOAT32, {1, 2, 2, 1}},\n {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);\n m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});\n m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});\n m.Invoke();\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));\n}\n\n// Do a test with the NN api with relu.\nTEST(NNAPIDelegate, AddWithRelu) {\n FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},\n {TensorType_FLOAT32, {1, 2, 2, 1}},\n {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU);\n m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});\n m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});\n m.Invoke();\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({0.0, 0.4, 1.0, 1.3}));\n}\n\n} // namespace\n} // namespace tflite\n\nint main(int argc, char** argv) {\n ::tflite::LogToStderr();\n ::testing::InitGoogleTest(&argc, argv);\n return RUN_ALL_TESTS();\n}\n" } ]
6
jmanndev/Rover-Project
https://github.com/jmanndev/Rover-Project
a97a202b472e8affc74729870294101a50eaa824
bfad32aed286da3c4bcaf9e5fc8bf0a2eee528ef
828ebfe54d2e8fe390fb3570693de04bfe9d2ca6
refs/heads/master
2021-03-27T09:59:15.407225
2018-02-28T08:54:20
2018-02-28T08:54:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5619266033172607, "alphanum_fraction": 0.5756880640983582, "avg_line_length": 17.16666603088379, "blob_id": "a76986069f3606b620c4a2162a5454389619ddb5", "content_id": "af37f0dc94b1cae2cd6ca4cb9d9af771f3a1b625", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 56, "num_lines": 24, "path": "/Python Scripts/template code/range_sensor.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "#Libraries\nimport RPi.GPIO as GPIO\nimport time\n \n#GPIO Mode (BOARD / BCM)\nGPIO.setmode(GPIO.BCM)\n \n#set GPIO Pins\nGPIO_TRIGGER = 23\nGPIO_ECHO = 24\n \n\n \n\nif __name__ == '__main__':\n try:\n while True:\n dist = distance()\n print (\"Measured Distance = %.1f cm\" % dist)\n time.sleep(1)\n \n # Reset by pressing CTRL + C\n except KeyboardInterrupt:\n print(\"Measurement stopped by User\")\n" }, { "alpha_fraction": 0.484375, "alphanum_fraction": 0.4915364682674408, "avg_line_length": 27.44444465637207, "blob_id": "3caafa62b4356b4ff737cb81c190883122a7a600", "content_id": "5113b48b973539379bf4c2d8b11f52b6ebdaa9a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1536, "license_type": "no_license", "max_line_length": 64, "num_lines": 54, "path": "/Final stuff/Rover/server_rover.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# RUNS ON ROVER\nimport socket\nimport sys\nimport Rover\n\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_name = sys.argv[1]\nserver_address = (server_name, 31417)\nprint >>sys.stderr, 'starting up on %s port %s' % server_address\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nsock.bind(server_address)\n\nengine = Rover.Engine()\n\n# Listen for incoming connections\nsock.listen(5) \nwhile True: \n try:\n # Wait for a connection\n print >>sys.stderr, 'waiting for a connection'\n connection, client_address = sock.accept()\n while True:\n data = connection.recv(50)\n print(\"received data:\" + data)\n if data:\n if data == 'FORWARD':\n engine.forward()\n elif data == 'BACKWARD':\n engine.backward()\n elif data == 'LEFT':\n engine.left()\n elif data == 'RIGHT':\n engine.right()\n elif data == 'IDLE':\n engine.idle()\n elif data == 'OFF':\n engine.off()\n elif data == 'UP':\n engine.up()\n elif data == 'DOWN':\n engine.down()\n else:\n print('Unknown Command')\n data = ''\n else:\n break\n except KeyBoardInterrupt:\n s.close()\n\n finally:\n connection.close()\n" }, { "alpha_fraction": 0.7251908183097839, "alphanum_fraction": 0.7389312982559204, "avg_line_length": 31.799999237060547, "blob_id": "094a81d32e563ccb436ede60bdcbc99117fd173e", "content_id": "f71c6e4405e7cc4022699cd2bf87cf394c55ed05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 655, "license_type": "no_license", "max_line_length": 164, "num_lines": 20, "path": "/Final stuff/django-rpi/mysite/roverapp/static/js/app.js", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "var video = document.querySelector(\"#videoElement\");\nvar canvas = document.getElementById(\"canvas\");\nvar context = canvas.getContext(\"2d\");\n\nnavigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia || navigator.oGetUserMedia;\n\nif (navigator.getUserMedia) {\n navigator.getUserMedia({\n video: true\n }, handleVideo, videoError);\n}\n\nfunction handleVideo(stream) {\n video.src = window.URL.createObjectURL(stream);\n}\n\n// Trigger photo take\ndocument.getElementById(\"snap-btn\").addEventListener(\"click\", function () {\n context.drawImage(video, 0, 0, 350, 280);\n});" }, { "alpha_fraction": 0.635477602481842, "alphanum_fraction": 0.6491228342056274, "avg_line_length": 18.69230842590332, "blob_id": "ff71130e9303c80bcd6eda9e8175c5a8a35626e1", "content_id": "fa6f9eb6aa04e5b26fbb1e538acb33e391ad6150", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 513, "license_type": "no_license", "max_line_length": 64, "num_lines": 26, "path": "/Python Scripts/AP CODE/command scripts/down.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# RUNS ON AP\n\nimport socket\nimport sys\n\n# Create a TCP/IP socket\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_name = sys.argv[1]\nserver_address = (server_name, 31417)\n\ndef sendToServer(message):\n try:\n # Send message\n print >>sys.stderr, 'sending \"%s\"' % message\n serverSocket.sendall(message)\n finally:\n return\n \ndef run():\n serverSocket.connect(server_address)\n sendToServer('DOWN')\n serverSocket.close()\n\nrun()\n\n" }, { "alpha_fraction": 0.6583850979804993, "alphanum_fraction": 0.6583850979804993, "avg_line_length": 31.266666412353516, "blob_id": "d511e1da72f72b9493fb0fc5779db7d50818f2ec", "content_id": "1122d8390fc59f73c023eb36c4928ea00b9d6dba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/Final stuff/django-rpi/mysite/roverapp/urls.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url('commandforward', views.cforward, name='forward'),\n url('commandbackward', views.cbackward, name='backward'),\n url('commandleft', views.cleft, name='left'),\n url('commandright', views.cright, name='right'),\n url('commandidle', views.cidle, name='idle'),\n url('commandup', views.cup, name='up'),\n url('commanddown', views.cdown, name='down')\n\n]" }, { "alpha_fraction": 0.5533333420753479, "alphanum_fraction": 0.6066666841506958, "avg_line_length": 22.6842098236084, "blob_id": "525dac0d3a048cf77bd68df812ba8b958d15d717", "content_id": "458b2235b19f698824d3b3d81ad499e12f6485c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/Final stuff/django-rpi/mysite/roverapp/migrations/0004_datareceived_propellorstate.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0 on 2018-02-26 03:38\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('roverapp', '0003_datareceived_distance'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='datareceived',\n name='propellorState',\n field=models.CharField(default=100, max_length=200),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.6861166954040527, "alphanum_fraction": 0.6871227622032166, "avg_line_length": 18.47058868408203, "blob_id": "2186e1dfa8801921d7f51d2b550e6d1febabb33b", "content_id": "4a68363ce127748ec5ca6cc2aab6ead91c22dcd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 994, "license_type": "no_license", "max_line_length": 70, "num_lines": 51, "path": "/Final stuff/django-rpi/mysite/roverapp/views.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom .models import DataReceived\nfrom .directions import forward, backward, left, right, up, down, idle\n\ndef index(request):\n data_list = DataReceived.objects.all()\n context = {\n 'data_list' : data_list,\n }\n return render(request, 'roverapp/index.html', context)\n\n# Create your views here.\n\ndef cforward(request):\n forward()\n return HttpResponseRedirect('/')\n\n\ndef cbackward(request):\n backward()\n return HttpResponseRedirect('/')\n\n\ndef cleft(request):\n left()\n return HttpResponseRedirect('/')\n\n\ndef cright(request):\n right()\n return HttpResponseRedirect('/')\n\n\ndef cidle(request):\n idle()\n return HttpResponseRedirect('/')\n\n\ndef cup(request):\n up()\n return HttpResponseRedirect('/')\n\n\ndef cdown(request):\n down()\n return HttpResponseRedirect('/')\n\n" }, { "alpha_fraction": 0.5623294115066528, "alphanum_fraction": 0.569608747959137, "avg_line_length": 21.4489803314209, "blob_id": "8b7d79be3e848f388c5a630b9029c1b267da1032", "content_id": "f25bb99dbadc843d1412bff6a412e52caa5a878a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1101, "license_type": "no_license", "max_line_length": 72, "num_lines": 49, "path": "/Python Scripts/ROVER CODE/datasend_rover.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport time\nimport Rover\nimport socket\nimport sys\nimport json\n\n# Create a TCP/IP socket\nserverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_name = sys.argv[1]\nserver_address = (server_name, 31415)\n\nengine = Rover.Engine()\nphil = Rover.Sensor()\nsonic = Rover.UltraSonic()\n\n\ndef sendToServer(message):\n try:\n # Send message\n print >>sys.stderr, 'sending \"%s\"' % message\n serverSocket.sendall(message)\n finally:\n return\n \n \ndef run():\n serverSocket.connect(server_address)\n print(' ~~~~\\t~~~~\\t~~~~\\t') #makes output pretty :)\n while True:\n phil.readAll()\n \n d = {\n \"time\" : time.strftime(\"%Y-%m-%d - %H:%M:%S\", time.gmtime())\n }\n \n d.update(phil.getDataAsDict().copy())\n d.update(engine.getDataAsDict())\n d.update(sonic.getDataAsDict())\n dataDict = d.copy()\n sendToServer(json.dumps(dataDict) + '☢')\n \n time.sleep(1)\n print(' ~~~~\\t~~~~\\t~~~~\\t') #makes output pretty :)\n serverSocket.close()\n\n\nrun()" }, { "alpha_fraction": 0.5193548202514648, "alphanum_fraction": 0.5612903237342834, "avg_line_length": 31.068965911865234, "blob_id": "d27c0e697fa281649a1c670854caacb8b97aceb0", "content_id": "c29733965eecd1c133624f24eb27cf5ecb19d985", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 930, "license_type": "no_license", "max_line_length": 114, "num_lines": 29, "path": "/Final stuff/django-rpi/mysite/roverapp/migrations/0001_initial.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.10 on 2018-02-25 09:11\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Data',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('sendTime', models.CharField(max_length=200)),\n ('heading', models.CharField(max_length=200)),\n ('roll', models.CharField(max_length=200)),\n ('pitch', models.CharField(max_length=200)),\n ('tempC', models.CharField(max_length=200)),\n ('leftState', models.CharField(max_length=200)),\n ('rightState', models.CharField(max_length=200)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5698602795600891, "alphanum_fraction": 0.5888223648071289, "avg_line_length": 15.080645561218262, "blob_id": "9428d19355a76953543f98cd28e7206c06293cac", "content_id": "c11447473d8fe18407f92c425ddbbc0bb230a9b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1002, "license_type": "no_license", "max_line_length": 68, "num_lines": 62, "path": "/Final stuff/django-rpi/mysite/roverapp/directions.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# RUNS ON AP\n\nimport socket\nimport sys\nimport time\n\n# Create a TCP/IP socket\nserver_name = '172.19.114.65' #sys.argv[1]\nserver_address = (server_name, 31417)\n\ndef sendToServer(message):\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serverSocket.connect(server_address)\n try:\n # Send message\n serverSocket.sendall(message)\n print >>sys.stderr, 'sending \"%s\"' % message\n finally:\n serverSocket.close()\n return\n \ndef forward():\n sendToServer('FORWARD')\n time.sleep(1)\n return\n \ndef backward():\n sendToServer('BACKWARD')\n time.sleep(1)\n return\n\n \ndef left():\n sendToServer('LEFT')\n return\n \n \ndef right():\n sendToServer('RIGHT')\n return\n \n \ndef idle():\n sendToServer('IDLE')\n return\n \n \ndef up():\n sendToServer('UP')\n return\n \n \ndef down():\n sendToServer('DOWN')\n return\n \n \ndef off():\n sendToServer('OFF')\n return\n\n " }, { "alpha_fraction": 0.49210333824157715, "alphanum_fraction": 0.5031900405883789, "avg_line_length": 25.632312774658203, "blob_id": "5fe1cfb3f7f5e14bb7bae56db4ac3f4315fc584d", "content_id": "1a1b1b05694b88f6550f4c06a57fada70621a63b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9561, "license_type": "no_license", "max_line_length": 117, "num_lines": 359, "path": "/Final stuff/Rover/Rover.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# Tags generated with - http://patorjk.com/software/taag/#p=display&f=Big\n\nimport logging\nimport sys\nimport time \nimport os\nimport RPi.GPIO as SONIC_GPIO\nimport pigpio \n\n\n\nRIGHT_MOTOR_GPIO = 13\nLEFT_MOTOR_GPIO = 5\nPROPELLOR_MOTOR_GPIO = 21\n\n\nSONIC_GPIO_TRIGGER = 23\nSONIC_GPIO_ECHO = 24\n\n\n# __ __ _ \n# | \\/ | | | \n# | \\ / | ___ | |_ ___ _ __ \n# | |\\/| |/ _ \\| __/ _ \\| '__|\n# | | | | (_) | || (_) | | \n# |_| |_|\\___/ \\__\\___/|_| \n\n \nclass Motor:\n idle_throttle = 1500\n off_throttle = 0\n clockwise_throttle = 1900\n anticlock_throttle = 1100\n \n def __init__(self, motorName, gpio):\n self.name = motorName\n self.gpioPin = gpio\n self.pi = pigpio.pi()\n self.idle()\n\n \n def off(self):\n self.log('off', self.off_throttle)\n self.adjust_motor(self.off_throttle)\n self.pi.stop()\n return\n\n\n def idle(self):\n self.log('idle', self.idle_throttle)\n self.adjust_motor(self.idle_throttle)\n return\n\n\n def clockwise(self):\n self.log('CW', self.clockwise_throttle)\n self.adjust_motor(self.clockwise_throttle)\n return\n\n\n def anticlock(self):\n self.log('ACW', self.anticlock_throttle)\n self.adjust_motor(self.anticlock_throttle)\n return\n\n \n def adjust_motor(self, rpm):\n self.pi.set_servo_pulsewidth(self.gpioPin, rpm)\n return\n \n def getState(self):\n return self.pi.get_servo_pulsewidth(self.gpioPin)\n\n\n def log(self, direction, rpm):\n print('{0} spinning {1} ({2})'.format(self.name, direction, rpm))\n return\n\n\n \n\n# ______ _ \n# | ____| (_) \n# | |__ _ __ __ _ _ _ __ ___ \n# | __| | '_ \\ / _` | | '_ \\ / _ \\\n# | |____| | | | (_| | | | | | __/\n# |______|_| |_|\\__, |_|_| |_|\\___|\n# __/ | \n# |___/ \n \nclass Engine:\n # Assumes spinning motor in clockwise direction pushes ROVER forward\n \n def __init__(self):\n self.rightMotor = Motor('right', RIGHT_MOTOR_GPIO)\n self.leftMotor = Motor('left', LEFT_MOTOR_GPIO)\n self.propellorMotor = Motor('propellor', PROPELLOR_MOTOR_GPIO)\n \n \n def off(self):\n print('\\tOFF')\n self.rightMotor.off()\n self.leftMotor.off()\n self.propellorMotor.off()\n return\n \n \n def idle(self):\n print('\\tIDLE')\n self.rightMotor.idle()\n self.leftMotor.idle()\n self.propellorMotor.idle()\n return\n \n \n def forward(self):\n print('\\tFORWARD')\n self.leftMotor.idle()\n self.rightMotor.idle()\n time.sleep(3)\n self.rightMotor.anticlock()\n self.leftMotor.anticlock()\n return\n \n \n def backward(self):\n print('\\tBACKWARD')\n self.leftMotor.idle()\n self.rightMotor.idle()\n time.sleep(3)\n self.rightMotor.clockwise()\n self.leftMotor.clockwise()\n return\n \n \n def right(self):\n print('\\tRIGHT')\n self.leftMotor.idle()\n self.rightMotor.idle()\n time.sleep(3)\n self.rightMotor.anticlock()\n self.leftMotor.clockwise()\n return\n \n \n def left(self):\n print('\\tLEFT')\n self.leftMotor.idle()\n self.rightMotor.idle()\n time.sleep(3)\n self.rightMotor.clockwise()\n self.leftMotor.anticlock()\n return\n \n \n def up(self):\n print('\\tUP')\n self.propellorMotor.idle()\n time.sleep(3)\n self.propellorMotor.clockwise()\n return\n \n \n def down(self):\n print('\\tDOWN')\n self.propellorMotor.idle()\n time.sleep(3)\n self.propellorMotor.anticlock()\n \n \n def getDataAsDict(self):\n d = {\n \"right\" : self.rightMotor.getState(),\n \"left\" : self.leftMotor.getState(),\n \"propellor\" : self.propellorMotor.getState()\n }\n return d\n \n \n\n\n# _____ \n# / ____| \n# | (___ ___ _ __ ___ ___ _ __ \n# \\___ \\ / _ \\ '_ \\/ __|/ _ \\| '__|\n# ____) | __/ | | \\__ \\ (_) | | \n# |_____/ \\___|_| |_|___/\\___/|_|\n\nfrom Adafruit_BNO055 import BNO055\n\nclass Sensor:\n def __init__(self):\n self.sys = None\n self.gyro = None\n self.accel = None\n self.mag = None\n \n self.heading = None\n self.roll = None\n self.pitch = None\n self.temp_c = None\n \n self.bno = BNO055.BNO055(serial_port='/dev/ttyUSB0', rst=18) # apparently rst value is not needed\n \n if not self.bno.begin():\n raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')\n # Print system status and self test result.\n \n status, self_test, error = self.bno.get_system_status()\n print('System status: {0}'.format(status))\n print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))\n \n # Print out an error if system status is in error mode.\n if status == 0x01:\n print('System error: {0}'.format(error))\n print('See datasheet section 4.3.59 for the meaning')\n print('Reading BNO055 data...')\n print('')\n self.readAll()\n \n \n def readAll(self):\n self.readOrientation();\n # Orientation as a quaternion:\n #x,y,z,w = bno.read_quaterion()\n \n self.readCalibration();\n self.readTemperature();\n \n # Magnetometer data (in micro-Teslas):\n #x,y,z = bno.read_magnetometer()\n \n # Gyroscope data (in degrees per second):\n #x,y,z = bno.read_gyroscope()\n \n # Accelerometer data (in meters per second squared):\n #x,y,z = bno.read_accelerometer()\n \n # Linear acceleration data (i.e. acceleration from movement, not gravity--\n # returned in meters per second squared):\n #x,y,z = bno.read_linear_acceleration()\n \n # Gravity acceleration data (i.e. acceleration just from gravity--returned\n # in meters per second squared):\n #x,y,z = bno.read_gravity()\n return\n \n \n def readOrientation(self):\n # Read the Euler angles for heading, roll, pitch (all in degrees).\n self.heading, self.roll, self.pitch = self.bno.read_euler()\n return\n \n \n def readCalibration(self):\n # Read the calibration status, 0=uncalibrated and 3=fully calibrated.\n self.sys, self.gyro, self.accel, self.mag = self.bno.get_calibration_status()\n return\n \n \n def readTemperature(self):\n # Sensor temperature in degrees Celsius:\n self.temp_c = self.bno.read_temp()\n return\n \n \n def updateGetHeading(self):\n self.readOrientation()\n return self.getHeading()\n \n \n def updateGetRoll(self):\n self.readOrientation()\n return self.getRoll()\n \n \n def updateGetPitch(self):\n self.readOrientation()\n return self.getPitch()\n \n \n def updateGetTemperature(self):\n self.readTemperature()\n return self.getTemperature()\n \n \n def displayData(self):\n print(self.getDataAsString())\n return\n \n \n def displayCalibration(self):\n print('Sys_cal={0} Gyro_cal={1} Accel_cal={2} Mag_cal={3}'.format(self.sys, self.gyro, self.accel, self.mag))\n return\n \n \n def getDataAsString(self):\n data = time.strftime(\"%H:%M:%S - %Y-%m-%d\", time.gmtime())\n data += ('\\nHeading={0:0.2f} Roll={1:0.2f} Pitch={2:0.2f}').format(self.heading, self.roll, self.pitch)\n data += '\\nTemp_c={0}'.format(self.temp_c)\n return data\n \n \n def getDataAsDict(self):\n d = {\n \"heading\" : self.heading,\n \"roll\" : self.roll,\n \"pitch\" : self.pitch,\n \"tempC\" : self.temp_c\n }\n return d\n \n\n\n# _ _ _ _ _____ _ \n# | | | | | | / ____| (_) \n# | | | | | |_ _ __ __ _| (___ ___ _ __ _ ___ \n# | | | | | __| '__/ _` |\\___ \\ / _ \\| '_ \\| |/ __|\n# | |__| | | |_| | | (_| |____) | (_) | | | | | (__ \n# \\____/|_|\\__|_| \\__,_|_____/ \\___/|_| |_|_|\\___|\n\nclass UltraSonic:\n def __init__(self):\n SONIC_GPIO.setmode(SONIC_GPIO.BCM)\n #set GPIO direction (IN / OUT)\n SONIC_GPIO.setup(SONIC_GPIO_TRIGGER, SONIC_GPIO.OUT)\n SONIC_GPIO.setup(SONIC_GPIO_ECHO, SONIC_GPIO.IN)\n \n def distance(self):\n # set Trigger to HIGH\n SONIC_GPIO.output(SONIC_GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n SONIC_GPIO.output(SONIC_GPIO_TRIGGER, False)\n\n StartTime = time.time()\n StopTime = time.time()\n\n # save StartTime\n while SONIC_GPIO.input(SONIC_GPIO_ECHO) == 0:\n StartTime = time.time()\n\n # save time of arrival\n while SONIC_GPIO.input(SONIC_GPIO_ECHO) == 1:\n StopTime = time.time()\n\n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = (TimeElapsed * 34300) / 2\n return distance\n \n def getDataAsDict(self):\n d = {\n \"distance\" : self.distance()\n }\n return d\n" }, { "alpha_fraction": 0.6829971075057983, "alphanum_fraction": 0.7233429551124573, "avg_line_length": 35.52631759643555, "blob_id": "dd77a86f989f4534b1f38626d430427309cd6ea1", "content_id": "79dc073390c23d22fcd317d100321ef71f2d6bed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 694, "license_type": "no_license", "max_line_length": 103, "num_lines": 19, "path": "/Final stuff/django-rpi/mysite/roverapp/models.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\n\nclass DataReceived(models.Model):\n sendTime = models.CharField(max_length=200)\n heading = models.CharField(max_length=200)\n roll = models.CharField(max_length=200)\n pitch = models.CharField(max_length=200)\n tempC = models.CharField(max_length=200)\n leftState = models.CharField(max_length=200)\n rightState = models.CharField(max_length=200)\n propellorState = models.CharField(max_length=200)\n distance = models.CharField(max_length=200)\n def __str__(self):\n return '%s | %sC | Dist:%s | Heading:%s' % (self.sendTime, self.tempC, self.distance, self.heading)\n" }, { "alpha_fraction": 0.5972894430160522, "alphanum_fraction": 0.6718296408653259, "avg_line_length": 31.28125, "blob_id": "3e491672012efdae3b98935353aa518fc76645a0", "content_id": "760659e9e686aebaa9303390848beb0320ce9a6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 93, "num_lines": 32, "path": "/README.md", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# 41150-Rover-Project\n\n\n\nFor Reference:\n\n\n| Device | Identifier | Code Name |\n| ------------- |:-------------:| -----:|\n| Pi | 156-13 N3 W | Frank |\n| Gryoscope | BNO055 | Phil |\n| Pi | AP | Al |\n\n\n\n# Run process\nFiles on AP are in: \\projects\\django-rpi\\mysite\\\nFiles on ROVER are in: \\ROVER_project\\\nConfirm Django settings.py contains AP IP as an ALLOWED HOST\n1. `sudo motion` on ROVER\n1. `sudo pigpiod` on ROVER\n1. `python server_rover.py 10.0.0.2` on ROVER with ROVER IP as argument\n1. `python server_ap.py 123.1.1.1` on AP with AP IP as argument\n1. `source venv/bin/activate` on AP from projects/django-rpi\n1. Add AP IP to Allowed Hosts for Django\n1. Change IP in `direction.py` to point to ROVER\n1. `python manage.py runserver 123.1.1.1:8000` on AP with AP IP as argument with port 8000\n1. Launch `http://123.1.1.1:8000` in internet browser on a laptop\n\n_Ensure all previous code is running before continuing_\n\n1. `python datasend_rover.py 123.1.1.1` on ROVER with AP IP as argument\n" }, { "alpha_fraction": 0.6030855774879456, "alphanum_fraction": 0.6096306443214417, "avg_line_length": 30.455883026123047, "blob_id": "e2d88180075d8f8a124c50887760540502da4ea2", "content_id": "419e38c0bbd52bd713f0bc62d67577992853ef7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2143, "license_type": "no_license", "max_line_length": 278, "num_lines": 68, "path": "/Final stuff/django-rpi/mysite/server_ap.py", "repo_name": "jmanndev/Rover-Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# RUNS ON AP\nimport socket\nimport sys\nimport json\nimport sqlite3\n\nDATABASE_ENABLED = True \n\n \n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_name = sys.argv[1]\nserver_address = (server_name, 31415)\nprint >>sys.stderr, 'starting up on %s port %s' % server_address\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nsock.bind(server_address)\n\n# Connecetion to DB\nif DATABASE_ENABLED:\n sqlconnection = sqlite3.connect(\"rover.sqlite3\")\n cursor = sqlconnection.cursor()\n\n\n\n# message must be a json string\ndef saveDataToSQL(message, curs):\n data = json.loads(message)\n print(\"save: \" + message)\n \n format_str = \"\"\"INSERT INTO roverapp_datareceived (sendTime, heading, roll, pitch, tempC, leftState, rightState, propellorState, distance) VALUES (\"{sendTime}\", \"{heading}\", \"{roll}\", \"{pitch}\", \"{tempC}\", \"{leftState}\", \"{rightState}\", \"{propellorState}\", \"{distance}\");\"\"\"\n sql_command = format_str.format(sendTime=data[\"time\"], heading=data[\"heading\"], roll=data[\"roll\"], pitch=data[\"pitch\"], tempC=data[\"tempC\"], leftState=data[\"left\"], rightState=data[\"right\"], propellorState=data[\"propellor\"], distance=data[\"distance\"])\n \n curs.execute(sql_command)\n sqlconnection.commit() # do not delete this line\n\n\n# Listen for incoming connections\nsock.listen(5) \ntry:\n while True:\n # Wait for a connection\n print >>sys.stderr, 'waiting for a connection'\n connection, client_address = sock.accept()\n newData = ''\n\n while True:\n \n data = connection.recv(50)\n \n if data:\n newData += data\n if data.endswith('☢'):\n newData = newData.strip('☢')\n print(\"received data:\" + newData)\n if DATABASE_ENABLED:\n saveDataToSQL(newData, cursor)\n newData = ''\n else:\n break\n\nfinally:\n connection.close()\n if DATABASE_ENABLED:\n sqlconnection.commit()\n cursor.close()\n sqlconnection.close()\n" } ]
14
bsoli/Slang_pos_tagger
https://github.com/bsoli/Slang_pos_tagger
38f7daa309da12b9a2c0bbaaf79be34073da1f4e
32f29ee4254925da3a1af861f3e35a3923f043ea
a1fa98104300ae816aa29e2675fa76e0a093aa99
refs/heads/master
2021-01-19T23:05:11.258196
2017-04-20T23:15:49
2017-04-20T23:15:49
88,918,118
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6798418760299683, "alphanum_fraction": 0.6828063130378723, "avg_line_length": 27.47058868408203, "blob_id": "ea325556d6a4c6463121a7df87b5ae171b6ac62f", "content_id": "7960b2361b13ab478362b47356b5b425af824c75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1012, "license_type": "no_license", "max_line_length": 65, "num_lines": 34, "path": "/TweetTracker.py", "repo_name": "bsoli/Slang_pos_tagger", "src_encoding": "UTF-8", "text": "__author__ = 'benso_000'\r\nimport tweepy\r\nimport codecs\r\n\r\n\r\nconsumer_key = #removed for privacy\r\nconsumer_secret = #removed for privacy\r\naccess_token = #removed for privacy\r\naccess_token_secret = #removed for privacy\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_token, access_token_secret)\r\n\r\n\r\napi = tweepy.API(auth)\r\ntweets = []\r\nfriend_ids = []\r\nstatuses = []\r\nuser = api.me()\r\n#finds all the ids for the user's friends\r\nfor friend in api.friends_ids(user):\r\n friend_ids.append(friend)\r\n#adds a list of their most recent statuses to a list \r\nfor i in friend_ids:\r\n statuses.append(api.user_timeline(i))\r\n#adds the text version of each tweet to a list\r\nfor timeline in statuses:\r\n for tweet in timeline:\r\n tweets.append(tweet.text)\r\n \r\nf = open(\"slang.txt\", \"w\")\r\n#converts the unicode text to ascii and writes the text to a file\r\nfor tweet in tweets:\r\n f.write(tweet.encode('ascii', 'ignore').decode('ascii')+'\\n')\r\nf.close()\r\n \r\n" }, { "alpha_fraction": 0.6341463327407837, "alphanum_fraction": 0.6426829099655151, "avg_line_length": 36.19047546386719, "blob_id": "6fbead4620b93817900d5f78bbcda065864d29a4", "content_id": "3c09f0791009acd01fc456723d354a15c638641c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 820, "license_type": "no_license", "max_line_length": 128, "num_lines": 21, "path": "/tweetCleaner.py", "repo_name": "bsoli/Slang_pos_tagger", "src_encoding": "UTF-8", "text": "__author__ = 'benso_000'\r\nimport nltk\r\nimport string\r\nimport re\r\nfrom nltk.corpus import words\r\n\r\ndef remove_punc(s):\r\n #removes the punctuation from before and after a word\r\n return s.strip(string.punctuation)\r\n\r\nf = open(\"slang.txt\", 'r')\r\nf2 = open(\"slangwords.txt\", 'w')\r\ntweets = f.read().split()\r\nslang_words = []\r\nwords = words.words()\r\npattern = re.compile(r'.*[0-9]|RT|.*http|.*#|.*@') #regular expression to elimate numbers, links, hashtags, and names\r\nfor word in tweets:\r\n if remove_punc(word) not in words and not (pattern.match(word)) and remove_punc(word) not in slang_words:\r\n word = remove_punc(word) #loops through each words to determine if the word is slang then writes the word to a file\r\n slang_words.append(word)\r\n f2.write(word+'\\n')\r\n\r\n\r\n\r\n \r\n\r\n" }, { "alpha_fraction": 0.4897777736186981, "alphanum_fraction": 0.4924444556236267, "avg_line_length": 24.690475463867188, "blob_id": "27e8b8412b9ab2e3bae969f973077b977b57575c", "content_id": "3b5847ab8341eb06633122d9afba6303b63a18a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 63, "num_lines": 42, "path": "/tagger.py", "repo_name": "bsoli/Slang_pos_tagger", "src_encoding": "UTF-8", "text": "import re\r\n\r\ndef tag(s):\r\n #takes a word and matches the word to a patten\r\n #if no pattern matches, the word is assumed to be a noun\r\n for i in range(len(patterns)-1):\r\n pattern = re.compile(patterns[i])\r\n if re.match(pattern, s.lower()):\r\n return '('+s+', '+pos[i]+')'\r\n return '('+s+', '+'NN'+')'\r\nf = open('slangwords.txt', 'r')\r\nf2 = open('taggedslang.txt', 'w')\r\nslang = f.read().split()\r\n# a list of regular expression morphological patterns\r\npatterns = [\r\n r'.*ing$|.*in$', \r\n r'.*ed$', \r\n r'.*es$',\r\n r'.*n\\'t$|.*\\'ve|.*[aioI][nm][nm]a$',\r\n r'.*esque$|^ir.|^un.|.*less$|.*ous$|.*ful$', \r\n r'.*\\'s$',\r\n r'.*sss',\r\n r'.*s$', \r\n r'.*\\'ll$|\\'d', \r\n r'.*ly$' \r\n ] \r\n# a list of POS tags whose indices correspond to regEx patterns\r\npos = [\r\n \"VBG\", #Gerund\r\n \"VBD\", #Past Tense verb\r\n \"VBZ\", #Present tense verb\r\n \"VB\", #Verb\r\n \"ADJ\", #Adjective\r\n \"NN$\", #Possesive\r\n \"INT\", #Interjection\r\n \"NNS\", #Plural noun\r\n \"MOD\", #Modal\r\n \"ADV\" #Adverb\r\n ]\r\n\r\nfor word in slang:\r\n f2.write(tag(word)+'\\n')\r\n\r\n\r\n" }, { "alpha_fraction": 0.7850877046585083, "alphanum_fraction": 0.7850877046585083, "avg_line_length": 73.91666412353516, "blob_id": "66a06600d11963a5968179942fbb772e1b9d7434", "content_id": "72acbe8bea099e272add6afc208e46179dfb51b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 912, "license_type": "no_license", "max_line_length": 86, "num_lines": 12, "path": "/readme.txt", "repo_name": "bsoli/Slang_pos_tagger", "src_encoding": "UTF-8", "text": "This project uses three python class files and three text files. The first class file,\r\nTweetTracker, user the Tweepy Twitter api to go through each of the user's\r\nfriends on Twitter and retrieve the twenty most recent tweet from each friend.\r\nEach tweet is converted to its ascii representation and written to the text\r\nfile, slang.txt. The second class, tweetCleaner, examines every word in\r\nslang.txt and determines if the word is slang by comparing it to the\r\nnltk corpus. The algorithm also removes links, Twitter hashtags, and numbers.\r\nEach slang word is then written to a file, slangwords.txt. The third class,\r\ntagger, uses that files to look for regular expression patterns in each word.\r\nThe first pattern that matches is assumed to be the word's part of speech. If\r\nno matching patter is found, the word is assumed to be a noun. Each word and it's\r\npart of speech is then written into a text file. \r\n" } ]
4
jinilcs/user_similarity
https://github.com/jinilcs/user_similarity
7e3d2b721eaf3af12adf209b5f10de67017dd280
fdc2be80846367dfc74290fcc6a1959bc455240e
d3b5f22c4cc6db7f7f0538e2126a637f37064ae1
refs/heads/master
2020-03-26T22:35:27.624298
2018-08-24T20:34:27
2018-08-24T20:34:27
145,471,306
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.600350558757782, "alphanum_fraction": 0.600350558757782, "avg_line_length": 33.57575607299805, "blob_id": "28bb6740229a44015747e684d6ba27e13d71a569", "content_id": "5fee91a7140e9753fcbf2885be72c81d7756f3ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1141, "license_type": "no_license", "max_line_length": 72, "num_lines": 33, "path": "/usersim/cache.py", "repo_name": "jinilcs/user_similarity", "src_encoding": "UTF-8", "text": "import collections\n\nclass LRUCache:\n \"\"\"\n Cache Implementation to store recently requested similar users data \n \"\"\"\n def __init__(self, capacity):\n \"\"\"\n capacity defines the number of users to be cached\n \"\"\"\n self._user_table = collections.OrderedDict()\n self._capacity = capacity\n \n def lookup(self, user_handle):\n \"\"\"\n Returns the user list from cache if present and move it to front\n \"\"\"\n if user_handle not in self._user_table:\n return False, None\n similar_users = self._user_table.pop(user_handle)\n self._user_table[user_handle] = similar_users\n return True, similar_users\n \n def insert(self, user_handle, similar_users):\n \"\"\"\n Insert new user list into cache. \n Remove the least recently used entry if cache is full\n \"\"\"\n if user_handle in self._user_table:\n similar_users = self._user_table.pop(user_handle)\n elif self._capacity <= len(self._user_table):\n self._user_table.popitem(last=False)\n self._user_table[user_handle] = similar_users\n" }, { "alpha_fraction": 0.7530446648597717, "alphanum_fraction": 0.7774019241333008, "avg_line_length": 41.228572845458984, "blob_id": "0f72cc5899dc8aade10d50aa3b2d88c0fc1bbb12", "content_id": "2aa02c8a5b1532bc4ee23b16fb0396a74c6a7ba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2956, "license_type": "no_license", "max_line_length": 242, "num_lines": 70, "path": "/README.md", "repo_name": "jinilcs/user_similarity", "src_encoding": "UTF-8", "text": "## User Similarity\n\nThis project is to find similar users based on user interests on different courses.\nThis consists of data preprocessing steps and the API code for RESTful endpoint to get similar users\n\n### Dependencies\n\nThe below libraries are used to develop this project\n\n- Flask==1.0.2\n- matplotlib==2.2.3\n- numpy==1.15.1\n- pandas==0.23.4\n- scikit-learn==0.19.2\n- scipy==1.1.0\n\n### Source code\n\ngit clone https://github.com/jinilcs/user_similarity.git\n\n### Data Flow Diagram:\n![alt text](dataflow.jpg \"Data Flow\")\n\n#### Preprocessing: \nPreprocessing steps are available as Jupyter notebook (notebook/usersim.ipynb) and as a python script (usersim/preprocess.py)\n\nDuring this step, different datasets are loaded, cleaned and applied required transformations.\n\n#### Generate User Feature Vectors: \nPreprocessed data sets are merged to generate feature vectors for all the users.\n\n#### Dimentionality Reduction:\nUser feature vectors are sparse and has a very large number of dimensions. So using SVD (Singular Value Decomposition), number of dimensions are reduced to make the user feature vector size small. And the result will be persisted to database.\n\n#### Cosine Similarity:\nCosine Similarity function is used to find how similar the users are. This works best on huge sparse data on a positive space. \n\n#### RESTful API:\n\nTo call the API to find the similar users,\n\nhttp://hostname/similarusers/<userhandle\\> -- This will return a list of 10 users similar to the user handle\nhttp://hostname/similarusers/<userhandle\\>?numusers=100 -- This will return a list of 100 users similar to the user handle\n\n\nThis is deployed in AWS EC2 instance and currently available for use.\n\nhttp://ec2-18-212-6-232.compute-1.amazonaws.com/similarusers/156 => Returns 10 similar users of user handle 156\nhttp://ec2-18-212-6-232.compute-1.amazonaws.com/similarusers/156?numusers=200 => Returns 200 similar users of user handle 156\n\n#### LRUCache:\n\nThis is included as part of API call. Recently accessed user handle responses will be cached. And least recently used will be removed from the cache. Implementation is available in usersim/cache.py\n\n### Notes:\n\n#### Similarity calculation:\nCosine similarity function has been used in this project to find the similar users. The data set becomes a huge sparse matrix after preprocessing. Cosine similarity works really good on big sparse dataset.\n\n#### Big data recommendations:\nDistributed file systems like HDFS for storage\nSpark SQL and Dataframe to preprocess the data\nSpark Mllib will be really good for machine learning models on big data stored in distributed file systems\nMemory cache to reduce the latency of responses (LRUCache is implemented in this project)\n\n#### Other data to collect:\nThe below set of data would be more useful to find the similar users\n- User personal details (age, sex, location, occupation etc)\n- Course ratings given by users\n- User review comments on courses (for sentiment analysis)\n" }, { "alpha_fraction": 0.7093291282653809, "alphanum_fraction": 0.7164866924285889, "avg_line_length": 37.877357482910156, "blob_id": "52f14c6d48464e4907837bcfbdfb4110cf709d0e", "content_id": "f5b4539fa30a9fce0c17b148076c9849ba762821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8243, "license_type": "no_license", "max_line_length": 137, "num_lines": 212, "path": "/usersim/preprocess.py", "repo_name": "jinilcs/user_similarity", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.pyplot as plt\nimport sqlite3\nfrom sklearn.externals import joblib\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.decomposition import TruncatedSVD\nimport collections\n\n\ndef load_data():\n\t\"\"\"\n\tLoads data from the csv files and writes to sqlite database for later use\n\t\"\"\"\n\tscores = pd.read_csv('../data/user_assessment_scores.csv')\n\tviews = pd.read_csv('../data/user_course_views.csv')\n\ttags = pd.read_csv('../data/course_tags.csv')\n\tinterests = pd.read_csv('../data/user_interests.csv')\n\n\tdb_file = '../db/usersim.sqlite'\n\ttry:\n\t\tengine = sqlite3.connect(db_file, timeout=10)\n\t\tscores.to_sql('scores', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\tviews.to_sql('views', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\ttags.to_sql('tags', engine, if_exists='replace', index=False, index_label='course_id')\n\t\tinterests.to_sql('interests', engine, if_exists='replace', index=False, index_label='user_handle')\n\texcept:\n\t\tprint('Error occured while inserting into database')\n\tfinally:\n\t\tif engine:\n\t\t\tengine.close()\n\treturn scores, views, tags, interests\n\ndef expand_scores(row):\n tags = row['assessment_tag']\n decays = row['user_assessment_decay']\n for tag, decay in zip(tags,decays):\n row[tag] = decay\n return row\n\ndef preprocess_scores(scores):\n\t\"\"\"\n\tProcess the scores dataframe\n\t\"\"\"\n\tscores['user_assessment_date'] = pd.to_datetime(scores['user_assessment_date'])\n\tscores['user_assessment_age'] = pd.to_datetime(scores['user_assessment_date'].max() + pd.DateOffset(1)) - scores['user_assessment_date']\n\n\t#converting Date to days\n\tscores['user_assessment_age'] = scores['user_assessment_age'].apply(lambda x: x.days)\n\n\t#Decay factor\n\tscores['user_assessment_decay'] = 1/(scores['user_assessment_age']//30 + 1)\n\n\tscores['user_assessment_decay'] = scores['user_assessment_score'] * scores['user_assessment_decay']\n\tscores_decay_scaler = MinMaxScaler()\n\tscores['user_assessment_decay'] = scores_decay_scaler.fit_transform(scores['user_assessment_decay'].values.reshape(-1,1))\n\tscores.drop(['user_assessment_date', 'user_assessment_score', 'user_assessment_age'], axis=1, inplace=True)\n\tscores_tags = scores.groupby(by='user_handle')['assessment_tag'].apply(list).reset_index()\n\tscores_decay = scores.groupby(by='user_handle')['user_assessment_decay'].apply(list).reset_index()\n\tscores_final = pd.merge(scores_tags, scores_decay, on='user_handle')\n\n\tscores_final = scores_final.apply(expand_scores, axis=1)\n\tscores_final.fillna(value=0, inplace=True)\n\tscores_final.drop(['assessment_tag', 'user_assessment_decay'], axis=1, inplace=True)\n\treturn scores_final\n\ndef expand_views_record(row):\n course_ids = row['course_id']\n view_stregths = row['view_stregth']\n tags = row['course_tags']\n \n tag_strengths = collections.defaultdict(list)\n for course, strength, ctags in zip(course_ids, view_stregths, tags):\n row[course] = strength\n for tag in ctags:\n tag_strengths[tag].append(strength)\n\n for tag, values in tag_strengths.items():\n row[tag] = np.max(values)\n\n return row\n\ndef preprocess_views_tags(views, tags):\n\t\"\"\"\n\tProcess the views and tags dataframe\n\t\"\"\"\n\tmax_val = views['view_time_seconds'].quantile(0.995)\n\tviews['view_time_seconds'] = np.clip(views['view_time_seconds'], 0 , max_val)\n\n\t#Finding view strength using a decay factor based on view date\n\tviews['view_date'] = pd.to_datetime(views['view_date'])\n\tviews['view_age'] = pd.to_datetime(views['view_date'].max() + pd.DateOffset(1)) - views['view_date']\n\tviews['view_age'] = views['view_age'].apply(lambda x: x.days)\n\tviews['view_decay'] = 1/(views['view_age']//30 + 1)\n\tviews['view_stregth'] = views['view_time_seconds'] * views['view_decay']\n\t#Log Transformation for normal distribution from skewed distribution\n\tviews['view_stregth'] = np.log2(views['view_stregth']+3)\n\tviews_strength_scaler = MinMaxScaler()\n\tviews['view_stregth'] = views_strength_scaler.fit_transform(views['view_stregth'].values.reshape(-1,1))\n\tviews.drop(['view_date', 'view_time_seconds', 'view_age', 'view_decay' ], axis=1, inplace=True)\n\tviews = views.groupby(by=['user_handle', 'course_id']).max()['view_stregth'].reset_index()\n\n\t#Removing missing data\n\ttags.dropna(inplace=True)\n\n\t#Grouping tags for each course id\n\ttags = tags.groupby(by='course_id')['course_tags'].apply(set).reset_index()\n\n\tviews = pd.merge(views, tags, on='course_id')\n\tviews = views.sort_values(by=['user_handle', 'view_stregth'])\n\tviews_course = views.groupby(by='user_handle')['course_id'].apply(list).reset_index()\n\tviews_strength = views.groupby(by='user_handle')['view_stregth'].apply(list).reset_index()\n\tviews_tags= views.groupby(by='user_handle')['course_tags'].apply(list).reset_index()\n\tviews_df = pd.merge(views_course, views_strength, on='user_handle')\n\tviews_df = pd.merge(views_df, views_tags, on='user_handle')\n\tviews_final = views_df.apply(expand_views_record, axis=1)\n\tviews_final.fillna(value=0, inplace=True)\n\tviews_final.drop(['course_id','view_stregth','course_tags'], axis=1, inplace=True)\n\treturn views_final\n\n\ndef expand_interests(row):\n tags = row['interest_tag']\n decays = row['interest_decay']\n \n for tag, decay in zip(tags, decays):\n row[tag] = decay\n \n return row\n\n\ndef preprocess_interests(interests):\n\t\"\"\"\n\tProcess the interests dataframe\n\t\"\"\"\n\tinterests['date_followed']= pd.to_datetime(interests['date_followed'])\n\tinterests['interest_age'] = pd.to_datetime(interests['date_followed'].max() + pd.DateOffset(1)) - interests['date_followed']\n\tinterests['interest_age'] = interests['interest_age'].apply(lambda x: x.days)\n\tinterests['interest_decay'] = (1/interests['interest_age']//30 + 1)\n\n\tinterests.drop(['date_followed','interest_age'], axis=1, inplace=True)\n\n\tinterests_tag = interests.groupby(by='user_handle')['interest_tag'].apply(list).reset_index()\n\tinterests_decay = interests.groupby(by='user_handle')['interest_decay'].apply(list).reset_index()\n\tinterests = pd.merge(interests_tag, interests_decay, on='user_handle')\n\n\tinterests_final = interests.apply(expand_interests, axis=1)\n\tinterests_final.fillna(value=0, axis=1, inplace=True)\n\tinterests_final.drop(['interest_tag', 'interest_decay'], axis=1, inplace=True)\n\treturn interests_final\n\n\ndef generate_feature_vectors(scores_final, views_final, interests_final):\n\t\"\"\"\n\tGenerate sparse feauture vectors for all users from preprocessed dataframes\n\t\"\"\"\n\tusers = pd.merge(scores_final, views_final, how='outer', on='user_handle')\n\tusers = pd.merge(users, interests_final, how='outer', on='user_handle')\n\tusers.fillna(value=0, inplace=True)\n\tusers.set_index('user_handle', inplace=True)\n\treturn users\n\ndef reduce_dimentions(users):\n\t\"\"\"\n\tDimentionality reduction using SVD. TruncatedSVD is used since data is sparse\n\t300 features can retain 80% of informaion from the data\n\t\"\"\"\n\tsvd = TruncatedSVD(n_components=300, n_iter=10, random_state=42)\n\tsvd.fit(users)\n\tusers_svd = svd.transform(users)\n\tusers_svd = pd.DataFrame(users_svd, index=users.index)\n\treturn users_svd\n\ndef insert_to_database(users_svd):\n\t\"\"\"\n\tWrites the user feature vector into database for later use\n\tRESTful API to find the similar users will use this feature vector from database\n\t\"\"\"\n\tdb_file = '../db/usersim.sqlite'\n\ttry:\n\t\tengine = sqlite3.connect(db_file, timeout=10)\n\t\tusers_svd.to_sql('users', engine, if_exists='replace', index=True)\n\texcept:\n\t\tprint('Error occured while inserting to database')\n\tfinally:\n\t\tengine.close()\n\nif __name__ == \"__main__\":\n\n\tprint('Loading the file to dataframe and database')\n\tscores, views, tags, interests = load_data()\n\n\tprint('Processing scores')\n\tscores_final = preprocess_scores(scores)\n\n\tprint('Processing views and tags')\n\tviews_final = preprocess_views_tags(views, tags)\n\n\tprint('Processing interests')\n\tinterests_final = preprocess_interests(interests)\n\n\tprint('Generating user feature vectors')\n\tusers = generate_feature_vectors(scores_final, views_final, interests_final)\n\n\tprint('Reducing dimensions')\n\tusers_svd = reduce_dimentions(users)\n\n\tprint('writing to database')\n\tinsert_to_database(users_svd)\n\n\tprint('Preprocessing completed successfully')\n\n" }, { "alpha_fraction": 0.5515304803848267, "alphanum_fraction": 0.5672563910484314, "avg_line_length": 28.180328369140625, "blob_id": "687c03e55d058ce084ff6752388f2d147b12f01a", "content_id": "05e38cf52611910aa1cf9c18c24b68bd27e95ee4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3561, "license_type": "no_license", "max_line_length": 85, "num_lines": 122, "path": "/usersim/user.py", "repo_name": "jinilcs/user_similarity", "src_encoding": "UTF-8", "text": "from flask import Flask, request\nfrom flask import jsonify\nfrom flask import Response\nimport numpy as np\nimport sqlite3\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport heapq\nfrom cache import LRUCache\n\napp = Flask(__name__)\ndb_file = '../db/usersim.sqlite'\ncache = LRUCache(capacity=50)\nusers_cache_size = 100\nsql_batch_size = 1000\n\ndef send_error(message, status):\n \"\"\"\n To send an error response to client\n \"\"\"\n data = {}\n data['error_text'] = message\n data['users'] = []\n response = jsonify(data)\n response.status_code = status\n return response\n\n@app.route('/similarusers/<int:user_handle>')\ndef get_similar_users(user_handle):\n \"\"\"\n Returns the list of similar users in a json format\n \n Parameters\n ----------\n user_handle : int value, mandatory\n Unique identifier for a user. \n \n num_users : int value, optional\n Number of similar users in the result\n \n Returns\n ----------\n JSON response, which has error code, error text, status, \n and list of similar users. \n Sample response:\n {\"error\":0,\"error_text\":\"\",\"users\":[6744,8740], \"status\":200}\n \"\"\"\n num_users = request.args.get('numusers')\n if num_users:\n try:\n num_users = int(num_users)\n except:\n num_users = 10\n else:\n num_users = 10\n minheap = []\n conn = None\n cur = None\n \n similar_users = {}\n \n if num_users <= users_cache_size:\n is_found, user_list = cache.lookup(user_handle)\n if is_found:\n similar_users['users'] = user_list[:num_users]\n return jsonify(similar_users)\n \n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('select * from users where user_handle=?', (str(user_handle),)) \n input_user = cur.fetchone()\n if not input_user:\n return send_error('User handle is not found', 404)\n \n input_user = np.array(input_user).reshape(1,-1)[:,1:]\n cur.execute('select * from users')\n fetch_size = max(num_users, users_cache_size) + 1\n users = cur.fetchmany(fetch_size)\n if not users:\n return user_not_found('No similar users', 404)\n \n users = np.array(users)\n user_ids = users[:,0]\n users = users[:,1:]\n \n similarities = cosine_similarity(input_user, users)[0]\n \n for user_id, sim in zip(user_ids, similarities):\n if user_id != user_handle:\n minheap.append((sim, int(user_id)))\n \n heapq.heapify(minheap)\n\n while True:\n users = cur.fetchmany(sql_batch_size)\n if not users:\n break\n \n users = np.array(users)\n user_ids = users[:,0]\n users = users[:,1:]\n similarities = cosine_similarity(input_user, users)[0]\n\n for user_id, sim in zip(user_ids, similarities):\n if user_id != user_handle:\n heapq.heappushpop(minheap, (sim, int(user_id)))\n \n except Exception as e:\n return send_error(str(e), 500)\n finally:\n if cur is not None:\n cur.close()\n if conn is not None:\n conn.close()\n \n user_list = [s[1] for s in heapq.nlargest(fetch_size-1, minheap)]\n cache.insert(user_handle, user_list[:users_cache_size])\n similar_users['users'] = user_list[:num_users]\n return jsonify(similar_users)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=9080)\n\n" } ]
4
ltbird259/web-scraping-challenge
https://github.com/ltbird259/web-scraping-challenge
d09d817351055c0eaefad88ea9f22986b0b5272f
ef30b88bbd52cfbb1738f1882892c9afd39cf5ba
b4c32881ff01efbad8425bee040cb529ce4d3d92
refs/heads/master
2021-01-03T14:43:04.111161
2020-04-06T23:01:11
2020-04-06T23:01:11
240,113,127
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6398352384567261, "alphanum_fraction": 0.6432048082351685, "avg_line_length": 23.657407760620117, "blob_id": "7a5a9bb6d4e9043304f17a787aca1004b81a5bb9", "content_id": "05012b309f7d414324ad6ee5bc1f392ac1bb6efd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2671, "license_type": "no_license", "max_line_length": 108, "num_lines": 108, "path": "/scrape_mars.py", "repo_name": "ltbird259/web-scraping-challenge", "src_encoding": "UTF-8", "text": "\n\n\n\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport requests\nimport time\nimport re\n\n\n\ndef scrape():\n #browser start\n executable_path = {'executable_path': 'chromedriver.exe'}\n browser = Browser('chrome', **executable_path, headless=False)\n\n\n\n #nasa info\n urlnasa = 'https://mars.nasa.gov/news/'\n\n browser.visit(urlnasa)\n htmlnasa = bs(browser.html, \"html.parser\")\n time.sleep(8)\n\n\n newssummary = htmlnasa.find('div', class_='article_teaser_body')\n newsp = newssummary.get_text()\n\n newstitle = newssummary.previous_sibling.get_text()\n\n\n\n # nasa picture\n urlpicture = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(urlpicture)\n browser.find_by_id('full_image').click()\n\n\n\n # getting img url\n time.sleep(5)\n imgsoup = bs(browser.html, \"html.parser\")\n\n imgtag = imgsoup.find('img', class_='fancybox-image')\n\n imgsource = imgtag.get('src')\n\n imgsourcefinal = 'https://www.jpl.nasa.gov/' + imgsource\n\n\n\n\n url = \"https://twitter.com/marswxreport?lang=en\"\n browser.visit(url)\n time.sleep(5)\n html = browser.html\n weather_soup = bs(html, \"html.parser\")\n pattern = re.compile(r'sol')\n mars_weather = weather_soup.find('span', text=pattern).text\n\n\n\n spacefacts = \"https://space-facts.com/mars/\"\n tables = pd.read_html(spacefacts)\n facts = tables[1]\n facts_html = facts.to_html()\n\n\n hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(hemispheres_url)\n time.sleep(6)\n\n\n\n html_hemispheres = browser.html\n\n soup = bs(html_hemispheres, 'html.parser')\n\n hemispheres = soup.find_all('div', class_='item')\n\n hemispheresbase = 'https://astrogeology.usgs.gov'\n\n hemisphereimgs = []\n\n for hem in hemispheres: \n title = hem.find('h3').text\n \n partial_img_url = hem.find('a', class_='itemLink product-item')['href']\n \n browser.visit(hemispheresbase + partial_img_url)\n \n partial_img_html = browser.html\n \n soup = bs( partial_img_html, 'html.parser')\n \n img_url = hemispheresbase + soup.find('img', class_='wide-image')['src']\n \n hemisphereimgs.append({\"title\" : title, \"img_url\" : img_url})\n \n\n browser.quit()\n\n output = {'mars_news_title' : newstitle, 'mars_news_info' : newsp, 'mars_Image' : imgsourcefinal, \n 'mars_weather' : mars_weather, 'mars_facts' : facts_html, 'hemisphere_images' : hemisphereimgs}\n\n\n return output\n\n# print(newstitle, newssummary, imgsourcefinal, mars_weather, facts, hemisphereimgs)\n\n\n\n\n" } ]
1
fagan2888/Python
https://github.com/fagan2888/Python
425654b18055233949aa7e6181e0b2652975e185
1b125fbdf54efb390afe12aaa966f093218c4387
fae94c0dcb251ea5854e33e81369140ca75cfaf5
refs/heads/master
2020-12-13T14:17:59.452500
2018-06-18T16:16:30
2018-06-18T16:16:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6519434452056885, "alphanum_fraction": 0.6625441908836365, "avg_line_length": 24.68181800842285, "blob_id": "68ce384b7ee6b36064b00d14ed76079f29ba5c02", "content_id": "cee22dcc47379fdf96391100f01d360ad2c0942d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "permissive", "max_line_length": 67, "num_lines": 22, "path": "/src/bloombox/schema/__init__.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n bloombox: schema\n ~~~~~~~~~~~~~~~~\n :copyright: (c) Momentum Ideas Co., 2018\n :license: This software makes use of the Apache License v2.\n A copy of this license is included as ``LICENSE.md`` in\n the root of the project.\n\"\"\"\n\nimport sys, os\n\n\n## calculate schema path and add to sys.path\nschema_path = os.path.dirname(os.path.abspath(__file__))\nif schema_path not in sys.path: sys.path.append(schema_path)\n\n## preload some of the proto machinery\nimport grpc\nfrom google import api\nfrom google import protobuf\n\n" }, { "alpha_fraction": 0.7143802642822266, "alphanum_fraction": 0.7260696887969971, "avg_line_length": 44.34000015258789, "blob_id": "a8685afd4c3c89ea76d6ce2e7cbd95a1ed5c816e", "content_id": "0c0620aa5d78babd13b81ea3f790c36104daa3ca", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4534, "license_type": "permissive", "max_line_length": 119, "num_lines": 100, "path": "/src/bloombox/schema/services/platform/v1/PlatformService_v1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\nfrom platform.v1 import PlatformService_v1_pb2 as platform_dot_v1_dot_PlatformService__v1__pb2\n\n\nclass PlatformStub(object):\n \"\"\"Specifies the platform service, which provides utility/low-level platform methods, employed in health check probes\n and other observability tools.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Ping = channel.unary_unary(\n '/bloombox.schema.services.platform.v1.Platform/Ping',\n request_serializer=platform_dot_v1_dot_PlatformService__v1__pb2.Ping.Request.SerializeToString,\n response_deserializer=platform_dot_v1_dot_PlatformService__v1__pb2.Ping.Response.FromString,\n )\n self.Health = channel.unary_unary(\n '/bloombox.schema.services.platform.v1.Platform/Health',\n request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Resolve = channel.unary_unary(\n '/bloombox.schema.services.platform.v1.Platform/Resolve',\n request_serializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainResolve.Request.SerializeToString,\n response_deserializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainResolve.Response.FromString,\n )\n self.Domains = channel.unary_unary(\n '/bloombox.schema.services.platform.v1.Platform/Domains',\n request_serializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainInfo.Request.SerializeToString,\n response_deserializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainInfo.Response.FromString,\n )\n\n\nclass PlatformServicer(object):\n \"\"\"Specifies the platform service, which provides utility/low-level platform methods, employed in health check probes\n and other observability tools.\n \"\"\"\n\n def Ping(self, request, context):\n \"\"\"Ping the platform server.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Health(self, request, context):\n \"\"\"Run a health check, returning a status code indicating overall service health.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Resolve(self, request, context):\n \"\"\"Retrieve ownership information for a hosted domain.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Domains(self, request, context):\n \"\"\"Retrieve domain info for a given partner/location.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PlatformServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Ping': grpc.unary_unary_rpc_method_handler(\n servicer.Ping,\n request_deserializer=platform_dot_v1_dot_PlatformService__v1__pb2.Ping.Request.FromString,\n response_serializer=platform_dot_v1_dot_PlatformService__v1__pb2.Ping.Response.SerializeToString,\n ),\n 'Health': grpc.unary_unary_rpc_method_handler(\n servicer.Health,\n request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Resolve': grpc.unary_unary_rpc_method_handler(\n servicer.Resolve,\n request_deserializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainResolve.Request.FromString,\n response_serializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainResolve.Response.SerializeToString,\n ),\n 'Domains': grpc.unary_unary_rpc_method_handler(\n servicer.Domains,\n request_deserializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainInfo.Request.FromString,\n response_serializer=platform_dot_v1_dot_PlatformService__v1__pb2.DomainInfo.Response.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.platform.v1.Platform', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.7224975228309631, "alphanum_fraction": 0.735579788684845, "avg_line_length": 44.65610885620117, "blob_id": "382bcbbc0c52b9970d0c414e73c9d7121ea5f64f", "content_id": "0f5959c9eab084587b96c2f73fe1083f85d0e2bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10090, "license_type": "permissive", "max_line_length": 127, "num_lines": 221, "path": "/src/bloombox/schema/services/telemetry/v1beta3/TelemetryService_Beta3_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\nfrom telemetry.v1beta3 import GenericEvents_Beta3_pb2 as telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2\nfrom telemetry.v1beta3 import TelemetryService_Beta3_pb2 as telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2\n\n\nclass EventTelemetryStub(object):\n \"\"\"Provides support for transmission of operational and experiential telemetry data from first and second-party devices.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Ping = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Ping',\n request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Request.SerializeToString,\n response_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Response.FromString,\n )\n self.Event = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Event',\n request_serializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Batch = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Batch',\n request_serializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.BatchRequest.SerializeToString,\n response_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryResponse.FromString,\n )\n self.Error = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Error',\n request_serializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Exception.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass EventTelemetryServicer(object):\n \"\"\"Provides support for transmission of operational and experiential telemetry data from first and second-party devices.\n \"\"\"\n\n def Ping(self, request, context):\n \"\"\"Ping the server.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Event(self, request, context):\n \"\"\"Submit a generic event.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Batch(self, request, context):\n \"\"\"Submit one or more generic events via the batch interface.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Error(self, request, context):\n \"\"\"Submit one or more exception events.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_EventTelemetryServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Ping': grpc.unary_unary_rpc_method_handler(\n servicer.Ping,\n request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Request.FromString,\n response_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Response.SerializeToString,\n ),\n 'Event': grpc.unary_unary_rpc_method_handler(\n servicer.Event,\n request_deserializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Batch': grpc.unary_unary_rpc_method_handler(\n servicer.Batch,\n request_deserializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.BatchRequest.FromString,\n response_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryResponse.SerializeToString,\n ),\n 'Error': grpc.unary_unary_rpc_method_handler(\n servicer.Error,\n request_deserializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Exception.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.telemetry.v1beta3.EventTelemetry', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\nclass CommercialTelemetryStub(object):\n \"\"\"Provides support for tailored analytics payloads w.r.t. interactions between end-users and commercial models, like\n menu sections, products, and user orders.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Impression = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry/Impression',\n request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Impression.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.View = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry/View',\n request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.View.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Action = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry/Action',\n request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Action.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass CommercialTelemetryServicer(object):\n \"\"\"Provides support for tailored analytics payloads w.r.t. interactions between end-users and commercial models, like\n menu sections, products, and user orders.\n \"\"\"\n\n def Impression(self, request, context):\n \"\"\"Register that a menu section was presented to a user, regardless of whether they acted on it or not.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def View(self, request, context):\n \"\"\"Register that a menu section was viewed, browsed-to, or otherwise served to a user.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Action(self, request, context):\n \"\"\"Register that an end-user elected to take action within a section in some way.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_CommercialTelemetryServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Impression': grpc.unary_unary_rpc_method_handler(\n servicer.Impression,\n request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Impression.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'View': grpc.unary_unary_rpc_method_handler(\n servicer.View,\n request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.View.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Action': grpc.unary_unary_rpc_method_handler(\n servicer.Action,\n request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Action.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\nclass IdentityTelemetryStub(object):\n \"\"\"Provides support for recording telemetry information about user events and actions related to their own identity,\n account, profile, preferences, and so on.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Action = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta3.IdentityTelemetry/Action',\n request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.IdentityEvent.Action.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass IdentityTelemetryServicer(object):\n \"\"\"Provides support for recording telemetry information about user events and actions related to their own identity,\n account, profile, preferences, and so on.\n \"\"\"\n\n def Action(self, request, context):\n \"\"\"Register affirmative action taken by an end-user on their own account or identity.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_IdentityTelemetryServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Action': grpc.unary_unary_rpc_method_handler(\n servicer.Action,\n request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.IdentityEvent.Action.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.telemetry.v1beta3.IdentityTelemetry', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.6627299785614014, "alphanum_fraction": 0.6677265763282776, "avg_line_length": 24.888235092163086, "blob_id": "41ee19aa77d6f5d84bd90538f53cfe3bfb050eb5", "content_id": "85d1b8880658f4f58895747779b819ff8e3e9123", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 4403, "license_type": "permissive", "max_line_length": 139, "num_lines": 170, "path": "/Makefile", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "\n#\n## Bloombox: Python API Client\n#\n\nVERBOSE ?= no\nTESTS ?= yes\nCOVERAGE ?= yes\nVERSION ?= 0.0.1-alpha6\nSTAGING ?= yes\nSERVICES ?= auth:v1beta1 platform:v1 checkin:v1beta1 devices:v1beta1 marketing:v1beta1 menu:v1beta1 media:v1beta1 shop:v1 telemetry:v1beta3\n\nENV_PATH ?= .env\nBUILDBOT ?= no\n\nSERVICE_NAMES ?= $(foreach svc,$(SERVICES),$(firstword $(subst :, ,$(svc))))\n\nBASE_TEST_FLAGS = --no-byte-compile \\\n\t\t--traverse-namespace \\\n\t\t--with-xunit --xunit-file=build/tests.xml --xunit-testsuite-name=bloombox\n\nCOVERAGE_FLAGS = --with-coverage \\\n --cover-package=bloombox \\\n --cover-branches \\\n --cover-html --cover-html-dir=build/coverage-html \\\n --cover-xml --cover-xml-file=build/coverage.xml\n\nTEST_FLAGS ?=\n\nifeq ($(STAGING),no)\nPYPI ?= pypi\nelse\nPYPI ?= pypitest\nendif\n\nifeq ($(BUILDBOT),yes)\nPIP ?= pip\nPYTHON ?= python\nENVPYTHON ?= python\nNOSE ?= nosetests\nelse\nPIP ?= $(ENV_PATH)/bin/pip\nPYTHON ?= $(shell which python2.7)\nENVPYTHON ?= $(ENV_PATH)/bin/python\nNOSE ?= $(ENV_PATH)/bin/nosetests\nendif\n\nifeq ($(COVERAGE),yes)\nTEST_FLAGS += $(COVERAGE_FLAGS)\nendif\n\nifeq ($(VERBOSE),yes)\nCP_FLAGS ?= v\nRM_FLAGS ?= v\nTEST_FLAGS += -v\nelse\nCP_FLAGS ?=\nRM_FLAGS ?=\nendif\n\n_TEST_FLAGS = $(BASE_TEST_FLAGS) $(TEST_FLAGS)\n\nPYTHON_BUILD_TARGETS ?= build build_py\nPYTHON_DIST_TARGETS ?= sdist bdist_egg bdist_wheel\nPYTHON_TARGETS ?= $(PYTHON_BUILD_TARGETS) $(PYTHON_DIST_TARGETS)\nSCHEMA_PATH ?= src/bloombox/schema\n\n\nall: env build test\n\t@echo \"Done.\"\n\ninstall: install-egg-info install-lib\n\t@echo \"Installation done.\"\n\nenv: $(ENV_PATH)\n\nclean-schema:\n\t@echo \"Cleaning embedded schema...\"\n\t@mv $(SCHEMA_PATH)/__init__.py ./__schema_init__.py\n\t@rm -fr$(RM_FLAGS) $(SCHEMA_PATH)/*\n\t@mv ./__schema_init__.py $(SCHEMA_PATH)/__init__.py\n\t@$(MAKE) -C schema clean\n\nclean:\n\t@echo \"Cleaning PYC files...\"\n\t@find . -name '*.py[c,o]' -delete\n\t@echo \"Cleaning build...\"\n\t@rm -fr$(RM_FLAGS) build dist schema/languages/python\n\ndistclean: clean\n\t@echo \"Cleaning environment...\"\n\t@rm -fr$(RM_FLAGS) $(ENV_PATH) schema/languages\n\nifneq ($(BUILDBOT),yes)\n$(ENV_PATH):\n\t@echo \"Setting up environment...\"\n\t@mkdir -p $(ENV_PATH)\n\t@virtualenv $(ENV_PATH) -p $(PYTHON)\n\t@$(PIP) install -r requirements.txt\n\t@echo \"Environment ready.\"\nelse\n$(ENV_PATH):\n\t@echo \"Setting up environment for CI...\"\n\t@mkdir -p $(ENV_PATH)\n\t@$(PIP) install -r requirements.txt\n\t@echo \"Environment ready.\"\nendif\n\nsubmodules:\n\t@echo \"Fetching submodules...\"\n\t@git submodule update --init --recursive\n\nupdate-schema: submodules\n\t@echo \"Updating schema...\"\n\t@git submodule update --init --remote\n\nsync-schema: update-schema clean-schema embedded-schema\n\t@echo \"Sync done.\"\n\nschema/languages/python:\n\t@echo \"Building schema...\"\n\t@$(MAKE) -C schema LANGUAGES=\"python pygrpc c cpp\"\n\n$(SCHEMA_PATH)/__init__.py:\n\t@echo \"Installing Schema...\"\n\t@mkdir -p $(SCHEMA_PATH) $(SCHEMA_PATH)/services\n\t@cd schema/languages/python && cp -fr$(CP_FLAGS) ./* ../../../$(SCHEMA_PATH)/\n\n$(SCHEMA_PATH)/services/descriptor.py:\n\t@echo \"Installing services...\"\n\t@for service in $(SERVICE_NAMES); do \\\n\t\techo \"- Installing '$$service'...\"; \\\n\t\tmkdir -p $(SCHEMA_PATH)/services/$$service; \\\n\t\tcp -fr$(CP_FLAGS) schema/languages/pygrpc/$$service/* $(SCHEMA_PATH)/services/$$service; done\n\nembedded-schema: schema/languages/python $(SCHEMA_PATH)/__init__.py $(SCHEMA_PATH)/services/descriptor.py\n\t@echo \"Fixing up modules...\"\n\t@cd $(SCHEMA_PATH)/services && for directory in `find -s -x . -type d | xargs`; do touch $$directory/__init__.py; done\n\t@echo \"Installing schema...\"\n\t@mkdir -p $(SCHEMA_PATH) $(SCHEMA_PATH)/services\n\t@mv $(SCHEMA_PATH)/__init__.py $(SCHEMA_PATH)/__init_loader__.py\n\t@cd schema/languages/python && cp -fr$(CP_FLAGS) ./* ../../../$(SCHEMA_PATH)/\n\t@rm -f $(SCHEMA_PATH)/__init__.py\n\t@mv $(SCHEMA_PATH)/__init_loader__.py $(SCHEMA_PATH)/__init__.py\n\t@echo \"Embedded schema ready.\"\n\nbuild:\n\t@$(ENVPYTHON) setup.py $(PYTHON_TARGETS)\n\nrelease: clean build test\n\t@$(ENVPYTHON) setup.py $(PYTHON_TARGETS) check upload -r $(PYPI)\n\nifeq ($(TESTS),yes)\ntest: build\n\t@echo \"Running testsuite...\"\n\t@$(NOSE) $(_TEST_FLAGS) bloombox_tests\nelse\ntest:\n\t@echo \"Skipping testsuite.\"\nendif\n\ninstall-egg-info:\n\t@echo \"Installing egg info...\"\n\t@$(ENVPYTHON) setup.py install_egg_info\n\ninstall-lib:\n\t@echo \"Installing library...\"\n\t@$(ENVPYTHON) setup.py install_lib\n\ninteractive: all\n\t@PYTHONPATH=src $(ENVPYTHON) -B\n\n" }, { "alpha_fraction": 0.7222440838813782, "alphanum_fraction": 0.7346762418746948, "avg_line_length": 44.06737518310547, "blob_id": "a81f5591dda1604a22512aed48d45e95bf8cffbf", "content_id": "452ec200866e2cd4ff5967e4fa94fc1e1b13738e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12709, "license_type": "permissive", "max_line_length": 127, "num_lines": 282, "path": "/src/bloombox/schema/services/telemetry/v1beta4/TelemetryService_Beta4_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\nfrom telemetry.v1beta4 import GenericEvents_Beta4_pb2 as telemetry_dot_v1beta4_dot_GenericEvents__Beta4__pb2\nfrom telemetry.v1beta4 import TelemetryService_Beta4_pb2 as telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2\n\n\nclass EventTelemetryStub(object):\n \"\"\"Provides support for transmission of operational and experiential telemetry data from first and second-party devices.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Ping = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.EventTelemetry/Ping',\n request_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.TelemetryPing.Request.SerializeToString,\n response_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.TelemetryPing.Response.FromString,\n )\n self.Event = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.EventTelemetry/Event',\n request_serializer=telemetry_dot_v1beta4_dot_GenericEvents__Beta4__pb2.Event.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Batch = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.EventTelemetry/Batch',\n request_serializer=telemetry_dot_v1beta4_dot_GenericEvents__Beta4__pb2.Event.BatchRequest.SerializeToString,\n response_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.TelemetryResponse.FromString,\n )\n self.Error = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.EventTelemetry/Error',\n request_serializer=telemetry_dot_v1beta4_dot_GenericEvents__Beta4__pb2.Exception.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass EventTelemetryServicer(object):\n \"\"\"Provides support for transmission of operational and experiential telemetry data from first and second-party devices.\n \"\"\"\n\n def Ping(self, request, context):\n \"\"\"Ping the server.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Event(self, request, context):\n \"\"\"Submit a generic event.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Batch(self, request, context):\n \"\"\"Submit one or more generic events via the batch interface.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Error(self, request, context):\n \"\"\"Submit one or more exception events.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_EventTelemetryServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Ping': grpc.unary_unary_rpc_method_handler(\n servicer.Ping,\n request_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.TelemetryPing.Request.FromString,\n response_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.TelemetryPing.Response.SerializeToString,\n ),\n 'Event': grpc.unary_unary_rpc_method_handler(\n servicer.Event,\n request_deserializer=telemetry_dot_v1beta4_dot_GenericEvents__Beta4__pb2.Event.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Batch': grpc.unary_unary_rpc_method_handler(\n servicer.Batch,\n request_deserializer=telemetry_dot_v1beta4_dot_GenericEvents__Beta4__pb2.Event.BatchRequest.FromString,\n response_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.TelemetryResponse.SerializeToString,\n ),\n 'Error': grpc.unary_unary_rpc_method_handler(\n servicer.Error,\n request_deserializer=telemetry_dot_v1beta4_dot_GenericEvents__Beta4__pb2.Exception.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.telemetry.v1beta4.EventTelemetry', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\nclass CommercialTelemetryStub(object):\n \"\"\"Provides support for tailored analytics payloads w.r.t. interactions between end-users and commercial models, like\n menu sections, products, and user orders.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Impression = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.CommercialTelemetry/Impression',\n request_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.CommercialEvent.Impression.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.View = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.CommercialTelemetry/View',\n request_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.CommercialEvent.View.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Action = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.CommercialTelemetry/Action',\n request_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.CommercialEvent.Action.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass CommercialTelemetryServicer(object):\n \"\"\"Provides support for tailored analytics payloads w.r.t. interactions between end-users and commercial models, like\n menu sections, products, and user orders.\n \"\"\"\n\n def Impression(self, request, context):\n \"\"\"Register that a menu section was presented to a user, regardless of whether they acted on it or not.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def View(self, request, context):\n \"\"\"Register that a menu section was viewed, browsed-to, or otherwise served to a user.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Action(self, request, context):\n \"\"\"Register that an end-user elected to take action within a section in some way.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_CommercialTelemetryServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Impression': grpc.unary_unary_rpc_method_handler(\n servicer.Impression,\n request_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.CommercialEvent.Impression.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'View': grpc.unary_unary_rpc_method_handler(\n servicer.View,\n request_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.CommercialEvent.View.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Action': grpc.unary_unary_rpc_method_handler(\n servicer.Action,\n request_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.CommercialEvent.Action.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.telemetry.v1beta4.CommercialTelemetry', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\nclass IdentityTelemetryStub(object):\n \"\"\"Provides support for recording telemetry information about user events and actions related to their own identity,\n account, profile, preferences, and so on.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Action = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.IdentityTelemetry/Action',\n request_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.IdentityEvent.Action.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass IdentityTelemetryServicer(object):\n \"\"\"Provides support for recording telemetry information about user events and actions related to their own identity,\n account, profile, preferences, and so on.\n \"\"\"\n\n def Action(self, request, context):\n \"\"\"Register affirmative action taken by an end-user on their own account or identity.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_IdentityTelemetryServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Action': grpc.unary_unary_rpc_method_handler(\n servicer.Action,\n request_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.IdentityEvent.Action.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.telemetry.v1beta4.IdentityTelemetry', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\nclass SearchTelemetryStub(object):\n \"\"\"Provides support for recording telemetry information specific to user-submitted search queries, the resultsets they\n produce, and the user's response to those resultsets.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Query = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.SearchTelemetry/Query',\n request_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.SearchEvent.Query.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Result = channel.unary_unary(\n '/bloombox.schema.services.telemetry.v1beta4.SearchTelemetry/Result',\n request_serializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.SearchEvent.Result.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass SearchTelemetryServicer(object):\n \"\"\"Provides support for recording telemetry information specific to user-submitted search queries, the resultsets they\n produce, and the user's response to those resultsets.\n \"\"\"\n\n def Query(self, request, context):\n \"\"\"Record a search performed by a user.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Result(self, request, context):\n \"\"\"Record a search result selected by a user after performing a search.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_SearchTelemetryServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Query': grpc.unary_unary_rpc_method_handler(\n servicer.Query,\n request_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.SearchEvent.Query.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Result': grpc.unary_unary_rpc_method_handler(\n servicer.Result,\n request_deserializer=telemetry_dot_v1beta4_dot_TelemetryService__Beta4__pb2.SearchEvent.Result.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.telemetry.v1beta4.SearchTelemetry', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.6501210927963257, "alphanum_fraction": 0.6622276306152344, "avg_line_length": 25.645160675048828, "blob_id": "67a834699a3b04c82f7b9bf2b52878de83114e07", "content_id": "054796fb38131b08b7efda51114633d16690325e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 826, "license_type": "permissive", "max_line_length": 68, "num_lines": 31, "path": "/bloombox_tests/schema_tests.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n bloombox testsuite: schema tests\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n :copyright: (c) Momentum Ideas Co., 2018\n :license: This software makes use of the Apache License v2.\n A copy of this license is included as ``LICENSE.md`` in\n the root of the project.\n\"\"\"\n\nimport unittest\n\n\nclass LibrarySchemaTests(unittest.TestCase):\n\n \"\"\" Schema object tests. \"\"\"\n\n def test_schemas_import(self):\n\n \"\"\" Schemas: 'schema.base' objects should be importable. \"\"\"\n\n from bloombox.schema.base import ProductKey_pb2\n from bloombox.schema.base import ProductKind_pb2\n from bloombox.schema.base import ProductType_pb2\n\n def test_products_import(self):\n\n \"\"\" Schemas: 'schema.products' objects should be importable. \"\"\"\n\n from bloombox.schema.products import Flower_pb2\n" }, { "alpha_fraction": 0.7020330429077148, "alphanum_fraction": 0.7161372303962708, "avg_line_length": 46.12574768066406, "blob_id": "9dc702c6c263dfc828ce2521985ab64523b297a5", "content_id": "90e72abebdac20d4f45604e66582e3de16f7537c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7870, "license_type": "permissive", "max_line_length": 121, "num_lines": 167, "path": "/src/bloombox/schema/services/shop/v1/ShopService_v1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom shop.v1 import ShopService_v1_pb2 as shop_dot_v1_dot_ShopService__v1__pb2\n\n\nclass ShopStub(object):\n \"\"\"Specifies the retail shop service, which provides functionality for pickup and delivery orders, member verification,\n member enrollment, and more.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Ping = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/Ping',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.Ping.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.Ping.Response.FromString,\n )\n self.ShopInfo = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/ShopInfo',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.ShopInfo.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.ShopInfo.Response.FromString,\n )\n self.EnrollMember = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/EnrollMember',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.EnrollMember.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.EnrollMember.Response.FromString,\n )\n self.CheckZipcode = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/CheckZipcode',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.CheckZipcode.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.CheckZipcode.Response.FromString,\n )\n self.VerifyMember = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/VerifyMember',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.VerifyMember.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.VerifyMember.Response.FromString,\n )\n self.SubmitOrder = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/SubmitOrder',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.SubmitOrder.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.SubmitOrder.Response.FromString,\n )\n self.GetOrder = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/GetOrder',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.GetOrder.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.GetOrder.Response.FromString,\n )\n self.ShareOrder = channel.unary_unary(\n '/bloombox.schema.services.shop.v1.Shop/ShareOrder',\n request_serializer=shop_dot_v1_dot_ShopService__v1__pb2.ShareOrder.Request.SerializeToString,\n response_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.ShareOrder.Response.FromString,\n )\n\n\nclass ShopServicer(object):\n \"\"\"Specifies the retail shop service, which provides functionality for pickup and delivery orders, member verification,\n member enrollment, and more.\n \"\"\"\n\n def Ping(self, request, context):\n \"\"\"Ping the server.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ShopInfo(self, request, context):\n \"\"\"Query info and status of a particular shop.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def EnrollMember(self, request, context):\n \"\"\"Enroll a new member.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CheckZipcode(self, request, context):\n \"\"\"Check if a given USPS zip code is supported for delivery.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def VerifyMember(self, request, context):\n \"\"\"Verify a member by their email address.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def SubmitOrder(self, request, context):\n \"\"\"Submit an order for delivery or pickup.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetOrder(self, request, context):\n \"\"\"Retrieve an existing order.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ShareOrder(self, request, context):\n \"\"\"Share a commercial order with a given email address or phone number.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_ShopServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Ping': grpc.unary_unary_rpc_method_handler(\n servicer.Ping,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.Ping.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.Ping.Response.SerializeToString,\n ),\n 'ShopInfo': grpc.unary_unary_rpc_method_handler(\n servicer.ShopInfo,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.ShopInfo.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.ShopInfo.Response.SerializeToString,\n ),\n 'EnrollMember': grpc.unary_unary_rpc_method_handler(\n servicer.EnrollMember,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.EnrollMember.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.EnrollMember.Response.SerializeToString,\n ),\n 'CheckZipcode': grpc.unary_unary_rpc_method_handler(\n servicer.CheckZipcode,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.CheckZipcode.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.CheckZipcode.Response.SerializeToString,\n ),\n 'VerifyMember': grpc.unary_unary_rpc_method_handler(\n servicer.VerifyMember,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.VerifyMember.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.VerifyMember.Response.SerializeToString,\n ),\n 'SubmitOrder': grpc.unary_unary_rpc_method_handler(\n servicer.SubmitOrder,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.SubmitOrder.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.SubmitOrder.Response.SerializeToString,\n ),\n 'GetOrder': grpc.unary_unary_rpc_method_handler(\n servicer.GetOrder,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.GetOrder.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.GetOrder.Response.SerializeToString,\n ),\n 'ShareOrder': grpc.unary_unary_rpc_method_handler(\n servicer.ShareOrder,\n request_deserializer=shop_dot_v1_dot_ShopService__v1__pb2.ShareOrder.Request.FromString,\n response_serializer=shop_dot_v1_dot_ShopService__v1__pb2.ShareOrder.Response.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.shop.v1.Shop', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.4886363744735718, "alphanum_fraction": 0.6761363744735718, "avg_line_length": 18.55555534362793, "blob_id": "175be18b3d7cc54466dea7832ef147c9d55fb475", "content_id": "1b261223885e3f1a6fc35091ab364000473277d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 176, "license_type": "permissive", "max_line_length": 38, "num_lines": 9, "path": "/requirements.txt", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "coverage==4.5.1\nenum34==1.1.6\nfutures==3.2.0; python_version < '3.0'\ngoogleapis-common-protos==1.5.3\ngrpcio==1.8.4\ngrpcio-tools==1.12.1\nnose==1.3.7\nprotobuf==3.5.2\nsix==1.11.0\n" }, { "alpha_fraction": 0.5432525873184204, "alphanum_fraction": 0.5640138387680054, "avg_line_length": 27.899999618530273, "blob_id": "a52b087e8301e18d17ed56bedddac50ab31925fc", "content_id": "f071992664d4ce9964490a23e68660d4e166983b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "permissive", "max_line_length": 67, "num_lines": 10, "path": "/src/bloombox/client/__init__.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n bloombox: API client\n ~~~~~~~~~~~~~~~~~~~~\n :copyright: (c) Momentum Ideas Co., 2018\n :license: This software makes use of the Apache License v2.\n A copy of this license is included as ``LICENSE.md`` in\n the root of the project.\n\"\"\"\n" }, { "alpha_fraction": 0.7177727222442627, "alphanum_fraction": 0.7332189083099365, "avg_line_length": 48.94285583496094, "blob_id": "6bc1faf3a0d91d04f2e05fbd990991469caf90d8", "content_id": "d299c417b28cb353db9638c5938f797b64295fc2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5244, "license_type": "permissive", "max_line_length": 120, "num_lines": 105, "path": "/src/bloombox/schema/services/media/v1beta1/MediaService_Beta1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\nfrom media.v1beta1 import MediaService_Beta1_pb2 as media_dot_v1beta1_dot_MediaService__Beta1__pb2\n\n\nclass MediaStub(object):\n \"\"\"Specifies the media service, which provides tools for resolving, uploading/updating, and managing rich media data\n associated with various system data points.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.List = channel.unary_unary(\n '/bloombox.schema.services.media.v1beta1.Media/List',\n request_serializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.ListMedia.Request.SerializeToString,\n response_deserializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.ListMedia.Response.FromString,\n )\n self.Retrieve = channel.unary_unary(\n '/bloombox.schema.services.media.v1beta1.Media/Retrieve',\n request_serializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.GetMedia.Request.SerializeToString,\n response_deserializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.GetMedia.Response.FromString,\n )\n self.Upload = channel.unary_unary(\n '/bloombox.schema.services.media.v1beta1.Media/Upload',\n request_serializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.UploadMedia.Request.SerializeToString,\n response_deserializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.UploadMedia.Response.FromString,\n )\n self.Attach = channel.unary_unary(\n '/bloombox.schema.services.media.v1beta1.Media/Attach',\n request_serializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.AttachMedia.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass MediaServicer(object):\n \"\"\"Specifies the media service, which provides tools for resolving, uploading/updating, and managing rich media data\n associated with various system data points.\n \"\"\"\n\n def List(self, request, context):\n \"\"\"List media items for a given ownership scope (usually a partner or partner location). Only media items owned by\n the invoking partner/location are listed.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Retrieve(self, request, context):\n \"\"\"Retrieve an individual media item, addressable by its media key. If it cannot be found or the invoking user does\n not have permission to access it, a 404 is returned.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Upload(self, request, context):\n \"\"\"Provision a record for a new media item, and update the data attached to that record with an initial value. That\n is, upload and store a new, individual media item.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Attach(self, request, context):\n \"\"\"Notify the platform that a piece of recently uploaded/provisioned media is ready to be attached to the underlying\n subject parent (i.e. the product the media is depicting, or the partner/location the media is branding for, and so\n on), in cases where a client must perform followup to upload media to a separate endpoint.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_MediaServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'List': grpc.unary_unary_rpc_method_handler(\n servicer.List,\n request_deserializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.ListMedia.Request.FromString,\n response_serializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.ListMedia.Response.SerializeToString,\n ),\n 'Retrieve': grpc.unary_unary_rpc_method_handler(\n servicer.Retrieve,\n request_deserializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.GetMedia.Request.FromString,\n response_serializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.GetMedia.Response.SerializeToString,\n ),\n 'Upload': grpc.unary_unary_rpc_method_handler(\n servicer.Upload,\n request_deserializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.UploadMedia.Request.FromString,\n response_serializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.UploadMedia.Response.SerializeToString,\n ),\n 'Attach': grpc.unary_unary_rpc_method_handler(\n servicer.Attach,\n request_deserializer=media_dot_v1beta1_dot_MediaService__Beta1__pb2.AttachMedia.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.media.v1beta1.Media', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.7260476350784302, "alphanum_fraction": 0.7415543794631958, "avg_line_length": 51.59223175048828, "blob_id": "707c78f8e41205bd72bb3728d4e8ae80ce4cabe0", "content_id": "99146de0eaa43dd0d62233ee2027b96aaf615185", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10834, "license_type": "permissive", "max_line_length": 127, "num_lines": 206, "path": "/src/bloombox/schema/services/marketing/v1beta1/MarketingService_Beta1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\nfrom marketing.v1beta1 import MarketingService_Beta1_pb2 as marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2\n\n\nclass MarketingStub(object):\n \"\"\"Provides support for marketing campaign management, and outreach via arbitrary mediums (SMS, email, etc) to end-\n customers identified by Bloombox user accounts.\n -- API: Campaigns -- //\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.CreateCampaign = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/CreateCampaign',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignCreate.Request.SerializeToString,\n response_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignCreate.Response.FromString,\n )\n self.GetCampaign = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/GetCampaign',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignGet.Request.SerializeToString,\n response_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignGet.Response.FromString,\n )\n self.ListCampaigns = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/ListCampaigns',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignList.Request.SerializeToString,\n response_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignList.Response.FromString,\n )\n self.UpdateCampaign = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/UpdateCampaign',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignUpdate.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.ValidateCampaign = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/ValidateCampaign',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignValidate.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.SendCampaign = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/SendCampaign',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignSend.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Adgroups = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/Adgroups',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupList.Request.SerializeToString,\n response_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupList.Response.FromString,\n )\n self.CreateAdgroup = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/CreateAdgroup',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupCreate.Request.SerializeToString,\n response_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupCreate.Response.FromString,\n )\n self.Adgroup = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/Adgroup',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupGet.Request.SerializeToString,\n response_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupGet.Response.FromString,\n )\n self.UpdateAdgroup = channel.unary_unary(\n '/bloombox.schema.services.marketing.v1beta1.Marketing/UpdateAdgroup',\n request_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupUpdate.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass MarketingServicer(object):\n \"\"\"Provides support for marketing campaign management, and outreach via arbitrary mediums (SMS, email, etc) to end-\n customers identified by Bloombox user accounts.\n -- API: Campaigns -- //\n \"\"\"\n\n def CreateCampaign(self, request, context):\n \"\"\"Create a new marketing campaign from scratch.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetCampaign(self, request, context):\n \"\"\"Retrieve data for a given marketing campaign, addressed by its ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ListCampaigns(self, request, context):\n \"\"\"Fetch a list of marketing campaigns for a given partner/location scope.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateCampaign(self, request, context):\n \"\"\"Update underlying data for an existing marketing campaign.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ValidateCampaign(self, request, context):\n \"\"\"Validate a campaign's readiness before sending.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def SendCampaign(self, request, context):\n \"\"\"Trigger a marketing campaign to be sent.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Adgroups(self, request, context):\n \"\"\"-- API: Ad Groups -- //\n\n List ad groups for a given campaign.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CreateAdgroup(self, request, context):\n \"\"\"Create a new marketing campaign ad group from scratch.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Adgroup(self, request, context):\n \"\"\"Retrieve a marketing campaign's ad group by its ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateAdgroup(self, request, context):\n \"\"\"Update underlying data or content for a given set of campaign ad parameters.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_MarketingServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'CreateCampaign': grpc.unary_unary_rpc_method_handler(\n servicer.CreateCampaign,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignCreate.Request.FromString,\n response_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignCreate.Response.SerializeToString,\n ),\n 'GetCampaign': grpc.unary_unary_rpc_method_handler(\n servicer.GetCampaign,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignGet.Request.FromString,\n response_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignGet.Response.SerializeToString,\n ),\n 'ListCampaigns': grpc.unary_unary_rpc_method_handler(\n servicer.ListCampaigns,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignList.Request.FromString,\n response_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignList.Response.SerializeToString,\n ),\n 'UpdateCampaign': grpc.unary_unary_rpc_method_handler(\n servicer.UpdateCampaign,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignUpdate.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'ValidateCampaign': grpc.unary_unary_rpc_method_handler(\n servicer.ValidateCampaign,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignValidate.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'SendCampaign': grpc.unary_unary_rpc_method_handler(\n servicer.SendCampaign,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.CampaignSend.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Adgroups': grpc.unary_unary_rpc_method_handler(\n servicer.Adgroups,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupList.Request.FromString,\n response_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupList.Response.SerializeToString,\n ),\n 'CreateAdgroup': grpc.unary_unary_rpc_method_handler(\n servicer.CreateAdgroup,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupCreate.Request.FromString,\n response_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupCreate.Response.SerializeToString,\n ),\n 'Adgroup': grpc.unary_unary_rpc_method_handler(\n servicer.Adgroup,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupGet.Request.FromString,\n response_serializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupGet.Response.SerializeToString,\n ),\n 'UpdateAdgroup': grpc.unary_unary_rpc_method_handler(\n servicer.UpdateAdgroup,\n request_deserializer=marketing_dot_v1beta1_dot_MarketingService__Beta1__pb2.AdGroupUpdate.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.marketing.v1beta1.Marketing', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.7119507193565369, "alphanum_fraction": 0.7288317084312439, "avg_line_length": 48.105262756347656, "blob_id": "c012b68e060b4d338ce565a9f75598942809c70f", "content_id": "0125bd37c197db4fc2be146f40f56edfe7c27710", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7464, "license_type": "permissive", "max_line_length": 120, "num_lines": 152, "path": "/src/bloombox/schema/services/auth/v1beta1/AuthService_Beta1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom auth.v1beta1 import AuthService_Beta1_pb2 as auth_dot_v1beta1_dot_AuthService__Beta1__pb2\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\n\n\nclass AuthStub(object):\n \"\"\"Specifies the Auth service, which is responsible for authenticating and authorizing users. It also provides limited\n profile information for UI purposes.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Authenticate = channel.unary_unary(\n '/bloombox.schema.services.auth.v1beta1.Auth/Authenticate',\n request_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.AuthenticateUser.Request.SerializeToString,\n response_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.AuthenticateUser.Response.FromString,\n )\n self.Consent = channel.unary_unary(\n '/bloombox.schema.services.auth.v1beta1.Auth/Consent',\n request_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentFlow.Request.SerializeToString,\n response_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentFlow.Response.FromString,\n )\n self.Token = channel.unary_unary(\n '/bloombox.schema.services.auth.v1beta1.Auth/Token',\n request_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ResolveToken.Request.SerializeToString,\n response_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ResolveToken.Response.FromString,\n )\n self.Accept = channel.unary_unary(\n '/bloombox.schema.services.auth.v1beta1.Auth/Accept',\n request_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentDecision.Accept.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Reject = channel.unary_unary(\n '/bloombox.schema.services.auth.v1beta1.Auth/Reject',\n request_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentDecision.Reject.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Context = channel.unary_unary(\n '/bloombox.schema.services.auth.v1beta1.Auth/Context',\n request_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.UserContext.Request.SerializeToString,\n response_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.UserContext.Response.FromString,\n )\n self.Profile = channel.unary_unary(\n '/bloombox.schema.services.auth.v1beta1.Auth/Profile',\n request_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.GetProfile.Request.SerializeToString,\n response_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.GetProfile.Response.FromString,\n )\n\n\nclass AuthServicer(object):\n \"\"\"Specifies the Auth service, which is responsible for authenticating and authorizing users. It also provides limited\n profile information for UI purposes.\n \"\"\"\n\n def Authenticate(self, request, context):\n \"\"\"Authenticate an identity assertion of some kind from a user. Decide whether to grant them access to the subject\n account.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Consent(self, request, context):\n \"\"\"Retrieve consent flow information by ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Token(self, request, context):\n \"\"\"Retrieve an access token, given a completed ID and authorization ticket.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Accept(self, request, context):\n \"\"\"Indicates an affirmative consent decision from a user, during a consent flow.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Reject(self, request, context):\n \"\"\"Indicates declined consent from a user, during a consent flow.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Context(self, request, context):\n \"\"\"Generate a full user context after a successful ID and authorization flow.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Profile(self, request, context):\n \"\"\"Retrieve a user's profile by key.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_AuthServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Authenticate': grpc.unary_unary_rpc_method_handler(\n servicer.Authenticate,\n request_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.AuthenticateUser.Request.FromString,\n response_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.AuthenticateUser.Response.SerializeToString,\n ),\n 'Consent': grpc.unary_unary_rpc_method_handler(\n servicer.Consent,\n request_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentFlow.Request.FromString,\n response_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentFlow.Response.SerializeToString,\n ),\n 'Token': grpc.unary_unary_rpc_method_handler(\n servicer.Token,\n request_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ResolveToken.Request.FromString,\n response_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ResolveToken.Response.SerializeToString,\n ),\n 'Accept': grpc.unary_unary_rpc_method_handler(\n servicer.Accept,\n request_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentDecision.Accept.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Reject': grpc.unary_unary_rpc_method_handler(\n servicer.Reject,\n request_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.ConsentDecision.Reject.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Context': grpc.unary_unary_rpc_method_handler(\n servicer.Context,\n request_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.UserContext.Request.FromString,\n response_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.UserContext.Response.SerializeToString,\n ),\n 'Profile': grpc.unary_unary_rpc_method_handler(\n servicer.Profile,\n request_deserializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.GetProfile.Request.FromString,\n response_serializer=auth_dot_v1beta1_dot_AuthService__Beta1__pb2.GetProfile.Response.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.auth.v1beta1.Auth', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.7051486372947693, "alphanum_fraction": 0.722029447555542, "avg_line_length": 48.138248443603516, "blob_id": "2d8365e41eb429ef55e45fd9f51fb4450f5439a5", "content_id": "1e662f8704a827334a1a27a16ab6de406cefe3f5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10663, "license_type": "permissive", "max_line_length": 116, "num_lines": 217, "path": "/src/bloombox/schema/services/menu/v1beta1/MenuService_Beta1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\nfrom menu.v1beta1 import MenuService_Beta1_pb2 as menu_dot_v1beta1_dot_MenuService__Beta1__pb2\n\n\nclass MenuStub(object):\n \"\"\"Specifies the menu service, which provides tools for consuming, updating, and subscribing to menu data.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Retrieve = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Retrieve',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Request.SerializeToString,\n response_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Response.FromString,\n )\n self.Section = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Section',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Request.SerializeToString,\n response_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Response.FromString,\n )\n self.Featured = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Featured',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetFeatured.Request.SerializeToString,\n response_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetFeatured.Response.FromString,\n )\n self.Products = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Products',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetProduct.Request.SerializeToString,\n response_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetProduct.Response.FromString,\n )\n self.Search = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Search',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.SearchMenu.Request.SerializeToString,\n response_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.SearchMenu.Response.FromString,\n )\n self.Create = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Create',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.CreateProduct.Request.SerializeToString,\n response_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.CreateProduct.Response.FromString,\n )\n self.Update = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Update',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.UpdateProduct.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.Remove = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/Remove',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.DeleteProduct.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.ProductStatus = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/ProductStatus',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.ProductStock.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.InStock = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/InStock',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.ProductStock.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n self.OutOfStock = channel.unary_unary(\n '/bloombox.schema.services.menu.v1beta1.Menu/OutOfStock',\n request_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.ProductStock.Request.SerializeToString,\n response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n )\n\n\nclass MenuServicer(object):\n \"\"\"Specifies the menu service, which provides tools for consuming, updating, and subscribing to menu data.\n \"\"\"\n\n def Retrieve(self, request, context):\n \"\"\"Specifies an operation to read a full menu.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Section(self, request, context):\n \"\"\"Specifies an operation to read a sectioned menu.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Featured(self, request, context):\n \"\"\"Specifies an operation to read the list of featured products.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Products(self, request, context):\n \"\"\"Specifies an operation to read data for product(s) by key.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Search(self, request, context):\n \"\"\"Specifies an operation to read a full or sectioned menu.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Create(self, request, context):\n \"\"\"Create a new product record from scratch.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Update(self, request, context):\n \"\"\"Update an existing product record with new data.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Remove(self, request, context):\n \"\"\"Mark a product as deleted.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ProductStatus(self, request, context):\n \"\"\"Retrieve a single product's stock status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def InStock(self, request, context):\n \"\"\"Mark a product as currently in-stock.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def OutOfStock(self, request, context):\n \"\"\"Mark a product as currently out-of-stock.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_MenuServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Retrieve': grpc.unary_unary_rpc_method_handler(\n servicer.Retrieve,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Request.FromString,\n response_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Response.SerializeToString,\n ),\n 'Section': grpc.unary_unary_rpc_method_handler(\n servicer.Section,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Request.FromString,\n response_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetMenu.Response.SerializeToString,\n ),\n 'Featured': grpc.unary_unary_rpc_method_handler(\n servicer.Featured,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetFeatured.Request.FromString,\n response_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetFeatured.Response.SerializeToString,\n ),\n 'Products': grpc.unary_unary_rpc_method_handler(\n servicer.Products,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetProduct.Request.FromString,\n response_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.GetProduct.Response.SerializeToString,\n ),\n 'Search': grpc.unary_unary_rpc_method_handler(\n servicer.Search,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.SearchMenu.Request.FromString,\n response_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.SearchMenu.Response.SerializeToString,\n ),\n 'Create': grpc.unary_unary_rpc_method_handler(\n servicer.Create,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.CreateProduct.Request.FromString,\n response_serializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.CreateProduct.Response.SerializeToString,\n ),\n 'Update': grpc.unary_unary_rpc_method_handler(\n servicer.Update,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.UpdateProduct.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'Remove': grpc.unary_unary_rpc_method_handler(\n servicer.Remove,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.DeleteProduct.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'ProductStatus': grpc.unary_unary_rpc_method_handler(\n servicer.ProductStatus,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.ProductStock.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'InStock': grpc.unary_unary_rpc_method_handler(\n servicer.InStock,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.ProductStock.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n 'OutOfStock': grpc.unary_unary_rpc_method_handler(\n servicer.OutOfStock,\n request_deserializer=menu_dot_v1beta1_dot_MenuService__Beta1__pb2.ProductStock.Request.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.menu.v1beta1.Menu', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.717930793762207, "alphanum_fraction": 0.7340090870857239, "avg_line_length": 42.34848403930664, "blob_id": "8523f152a8cd3ba0b91b381bd5910e45033d9aa1", "content_id": "55fb46ee1813e2c980cdc6a6a49ca860bf41a84e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2861, "license_type": "permissive", "max_line_length": 119, "num_lines": 66, "path": "/src/bloombox/schema/services/devices/v1beta1/DevicesService_Beta1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom devices.v1beta1 import DevicesService_Beta1_pb2 as devices_dot_v1beta1_dot_DevicesService__Beta1__pb2\n\n\nclass DevicesStub(object):\n \"\"\"Specifies the devices service, which enables managed devices to check-in, authorize themselves, and discover their\n identity/role.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Ping = channel.unary_unary(\n '/bloombox.schema.services.devices.v1beta1.Devices/Ping',\n request_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.SerializeToString,\n response_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.FromString,\n )\n self.Activate = channel.unary_unary(\n '/bloombox.schema.services.devices.v1beta1.Devices/Activate',\n request_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.SerializeToString,\n response_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.FromString,\n )\n\n\nclass DevicesServicer(object):\n \"\"\"Specifies the devices service, which enables managed devices to check-in, authorize themselves, and discover their\n identity/role.\n \"\"\"\n\n def Ping(self, request, context):\n \"\"\"Ping the device server.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Activate(self, request, context):\n \"\"\"Setup and enable a device for live use. If this is the first time the subject device has activated itself,\n initialize or otherwise provision any requisite objects or resources.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_DevicesServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Ping': grpc.unary_unary_rpc_method_handler(\n servicer.Ping,\n request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Request.FromString,\n response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Ping.Response.SerializeToString,\n ),\n 'Activate': grpc.unary_unary_rpc_method_handler(\n servicer.Activate,\n request_deserializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Request.FromString,\n response_serializer=devices_dot_v1beta1_dot_DevicesService__Beta1__pb2.Activation.Response.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.devices.v1beta1.Devices', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.7187582850456238, "alphanum_fraction": 0.7357389330863953, "avg_line_length": 44.96341323852539, "blob_id": "ba16b82e0fadca7a8c7ffbadf4b3cf313c8d2c1c", "content_id": "2ebfbbb760bf460830b5883b587e462066055d1f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3769, "license_type": "permissive", "max_line_length": 120, "num_lines": 82, "path": "/src/bloombox/schema/services/checkin/v1beta1/CheckinService_Beta1_pb2_grpc.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom checkin.v1beta1 import CheckinService_Beta1_pb2 as checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2\n\n\nclass CheckinStub(object):\n \"\"\"Specifies the checkin service, which accepts opaque identification information for the purpose of checking users in\n to a physical brick-and-mortar retail location.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Ping = channel.unary_unary(\n '/bloombox.schema.services.checkin.v1beta1.Checkin/Ping',\n request_serializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.Ping.Request.SerializeToString,\n response_deserializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.Ping.Response.FromString,\n )\n self.Identification = channel.unary_unary(\n '/bloombox.schema.services.checkin.v1beta1.Checkin/Identification',\n request_serializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.IDCheckin.Request.SerializeToString,\n response_deserializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.CheckinResponse.FromString,\n )\n self.Card = channel.unary_unary(\n '/bloombox.schema.services.checkin.v1beta1.Checkin/Card',\n request_serializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.CardCheckin.Request.SerializeToString,\n response_deserializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.CheckinResponse.FromString,\n )\n\n\nclass CheckinServicer(object):\n \"\"\"Specifies the checkin service, which accepts opaque identification information for the purpose of checking users in\n to a physical brick-and-mortar retail location.\n \"\"\"\n\n def Ping(self, request, context):\n \"\"\"Ping the checkin server.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Identification(self, request, context):\n \"\"\"Specifies an operation to check a user in via their government ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Card(self, request, context):\n \"\"\"Specifies an operation to check a user in via their government ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_CheckinServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Ping': grpc.unary_unary_rpc_method_handler(\n servicer.Ping,\n request_deserializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.Ping.Request.FromString,\n response_serializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.Ping.Response.SerializeToString,\n ),\n 'Identification': grpc.unary_unary_rpc_method_handler(\n servicer.Identification,\n request_deserializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.IDCheckin.Request.FromString,\n response_serializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.CheckinResponse.SerializeToString,\n ),\n 'Card': grpc.unary_unary_rpc_method_handler(\n servicer.Card,\n request_deserializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.CardCheckin.Request.FromString,\n response_serializer=checkin_dot_v1beta1_dot_CheckinService__Beta1__pb2.CheckinResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'bloombox.schema.services.checkin.v1beta1.Checkin', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n" }, { "alpha_fraction": 0.621082603931427, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 22.399999618530273, "blob_id": "1fcfdaf63059ae5bab611324cbd2b389c04e13cb", "content_id": "8fcef13ab08a82dbe1fc9b234c1f7d33c7c03459", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "permissive", "max_line_length": 84, "num_lines": 30, "path": "/bloombox_tests/basic_tests.py", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n bloombox testsuite: basic tests\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n :copyright: (c) Momentum Ideas Co., 2018\n :license: This software makes use of the Apache License v2.\n A copy of this license is included as ``LICENSE.md`` in\n the root of the project.\n\"\"\"\n\nimport unittest\n\n\nclass BasicLibraryTests(unittest.TestCase):\n\n \"\"\" Basic library tests. \"\"\"\n\n def test_basic_import(self):\n\n \"\"\" Top-level: 'bloombox' should be importable. \"\"\"\n\n import bloombox\n\n def test_toplevel_import(self):\n\n \"\"\" Top-level: 'bloombox.schema' and 'bloombox.client' should be importable. \"\"\"\n\n from bloombox import schema\n from bloombox import client\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 15, "blob_id": "45f310eded1702566835be0a2a71bb9346c20bad", "content_id": "437d1b357a7f1fcf147fee3e041555af5a3821d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 48, "license_type": "permissive", "max_line_length": 21, "num_lines": 3, "path": "/tox.ini", "repo_name": "fagan2888/Python", "src_encoding": "UTF-8", "text": "[pep8]\nignore = E111,E121\nmax-line-length = 120\n" } ]
17
inehosa1/logindjango
https://github.com/inehosa1/logindjango
7acd9e9dc3187a02e090f92ff93ddf7746773461
366e77f1e639b767dcb971163cebc2ef959aac83
3eeca88d469c91dccad6d2f8b05d888d2ad6ca65
refs/heads/master
2020-03-28T06:41:42.072443
2018-09-07T22:40:28
2018-09-07T22:40:28
147,853,410
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7267267107963562, "alphanum_fraction": 0.7267267107963562, "avg_line_length": 35, "blob_id": "cda326d6bfbbd29ca45b669782ff77b35af7cde2", "content_id": "5ea49cad33b26db390dbf7a85a3d7c51dbaf7bb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/login/apps/user/urls.py", "repo_name": "inehosa1/logindjango", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\r\nfrom apps.user.views import RegisterUser, UpdateUser\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\napp_name = 'user'\r\nurlpatterns = [\r\n\turl(r'^registrar/', RegisterUser.as_view(), name=\"user_register\"),\r\n\turl(r'^perfil/', login_required(UpdateUser.as_view()), name='user_edit'),\r\n]\r\n" }, { "alpha_fraction": 0.7370078563690186, "alphanum_fraction": 0.7370078563690186, "avg_line_length": 33.33333206176758, "blob_id": "ade34bd42697c5579469e9d9301ccceaa0f9b501", "content_id": "2a0aaf53a16470d37952c609bb404622f0090250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1270, "license_type": "no_license", "max_line_length": 67, "num_lines": 36, "path": "/login/apps/user/views.py", "repo_name": "inehosa1/logindjango", "src_encoding": "UTF-8", "text": "import json\r\nfrom django.http import HttpResponse\r\nfrom django.contrib.auth.models import User\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.views.generic import CreateView, UpdateView\r\nfrom django.core.urlresolvers import reverse_lazy\r\nfrom apps.user.forms import RegisterForm\r\n\r\nclass RegisterUser(CreateView):\r\n\tmodel = User\r\n\ttemplate_name = \"user/register.html\"\r\n\tform_class = RegisterForm\r\n\tsuccess_url = reverse_lazy('login')\r\n\r\nclass UpdateUser(UpdateView):\r\n\tmodel = User\r\n\ttemplate_name = \"user/edit.html\"\r\n\tform_class = RegisterForm\r\n\tsuccess_url = reverse_lazy('user:user_edit')\r\n\r\n\tdef get(self, request, **kwargs):\r\n\t\tself.object = User.objects.get(pk=self.request.user.pk)\r\n\t\tform_class = self.get_form_class()\r\n\t\tform = self.get_form(form_class)\r\n\t\tcontext = self.get_context_data(object=self.object, form=form)\r\n\t\treturn self.render_to_response(context)\r\n\r\n\tdef post(self, request, *args, **kwargs):\r\n\t\tuser = User.objects.get(pk=self.request.user.pk)\r\n\t\tself.object = self.get_object\r\n\t\tform = self.form_class(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tform = form.save(request.POST,instance=user)\r\n\t\t\treturn HttpResponseRedirect(self.get_success_url())\r\n\t\telse:\r\n\t\t\treturn self.render_to_response(self.get_context_data(form=form))" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7589285969734192, "avg_line_length": 13, "blob_id": "6bb402e050e7890c72688ba2e17593896345efe9", "content_id": "6f400bd269e1e2c52c8ff9839de67f81156e63c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 337, "license_type": "no_license", "max_line_length": 30, "num_lines": 24, "path": "/README.md", "repo_name": "inehosa1/logindjango", "src_encoding": "UTF-8", "text": "# requirements\n- django==1.11\n\n# Login\n\n# Ejecutar migraciones \n\n- Ejecucion de migraciones\n\n- python manage.py migrate \n\n# Ejecutar aplicacion:\n- python manage.py runserver \n\n# Funciones del sistema\n\n- formulario de registro\n- formulario de logueo\n- formulario de perfil usuario\n\n# Funcionabilidades faltantes\n\n- validaciones\n- diseño\n" } ]
3
bartoszpogoda/my-public-github-profile-tests
https://github.com/bartoszpogoda/my-public-github-profile-tests
fb061a4dfac1994874230d852cc9aef429a2e2a5
f870a3ef433e538b1562e344bd0cef6893d96a56
11e308abaee2a9a28d6e48d101df2da22345d583
refs/heads/master
2020-03-18T02:05:16.585544
2018-05-20T18:10:56
2018-05-20T18:10:56
134,174,027
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6522625088691711, "alphanum_fraction": 0.6571545004844666, "avg_line_length": 38.58064651489258, "blob_id": "9806b72b94fbd9ed778f9a961b86cd8d6c59ce03", "content_id": "53acaad5d5550fe3a4531c9174186d74ecaafacf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2453, "license_type": "no_license", "max_line_length": 116, "num_lines": 62, "path": "/my-github-profile-tests.py", "repo_name": "bartoszpogoda/my-public-github-profile-tests", "src_encoding": "UTF-8", "text": "import unittest\nfrom selenium import webdriver\nfrom collections import Counter\nfrom datetime import datetime, timedelta\n\n\nclass MyPublicGithubProfileTests(unittest.TestCase):\n def setUp(self):\n # create a new Firefox session\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(30)\n self.driver.maximize_window()\n\n # navigate to my github page\n self.driver.get('https://github.com/bartoszpogoda')\n\n # Checks if my first and last name are set correctly\n def test_my_name(self):\n name_container = self.driver.find_element_by_css_selector('.vcard-fullname')\n self.assertTrue('Bartosz Pogoda' in name_container.text, 'Hey! Did you change your name recently?')\n\n # Checks if I am popular enough\n def test_more_than_5_followers(self):\n number_of_followers = int(self.driver.find_element_by_css_selector('[title=\"Followers\"] span.Counter').text)\n self.assertGreater(number_of_followers, 5, 'You\\'re not popular enough!')\n\n # Checks if Java is still my favorite language\n def test_java_dominance(self):\n # Navigate to repositories page\n self.driver.find_element_by_partial_link_text('Repositories').click()\n\n # Find most frequently used language\n languages = self.driver.find_elements_by_css_selector('span[itemprop=\"programmingLanguage\"]')\n languages = map(lambda x: x.text, languages)\n counted_languages = Counter(languages)\n\n most_common = counted_languages.most_common(1)\n\n # Assert it is Java\n self.assertEqual('Java', most_common[0][0], 'Seems like you should program more in Java.')\n\n # Checks if I've performed more than 5 commits in the last week\n def test_commits_last_week(self):\n last_week_dates = map(lambda date: date.strftime(\"%Y-%m-%d\"),\n (map(lambda diff: datetime.today() - timedelta(days=diff), range(0, 7))))\n\n def get_commits_for_date(date):\n return int(self.driver.find_element_by_css_selector\\\n ('svg.js-calendar-graph-svg rect.day[data-date=\"'\n + date + '\"').get_attribute('data-count'))\n\n commit_counts = list(map(get_commits_for_date, last_week_dates))\n\n self.assertGreater(sum(commit_counts), 5, 'Commit more to committing!')\n\n def tearDown(self):\n # close the browser window\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)" }, { "alpha_fraction": 0.7549669146537781, "alphanum_fraction": 0.7582781314849854, "avg_line_length": 29.25, "blob_id": "72bb0f35a372e80ecd8eeee8df961e7053f4fe95", "content_id": "969a66605e330f0855ef948777bab35b301585f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 604, "license_type": "no_license", "max_line_length": 120, "num_lines": 20, "path": "/README.md", "repo_name": "bartoszpogoda/my-public-github-profile-tests", "src_encoding": "UTF-8", "text": "## Project background\n\nTests were created for academic classes, the goal was to test chosen web application using Selenium Webdriver in Python.\n\n## Usage\n\nTo run the script you will need the **geckodriver** on your system PATH and python enviorment with selenium installed.\n\n```\npip install -U selenium\n```\n\n## Test coverage\n\nMy tests check following assertions:\n\n- If my name is set up correctly,\n- if I have more than 5 followers (popularity check),\n- if Java is the most frequent language for my public repos (java dominance check), \n- if I've commited more than 5 times in last week (lazyness check)." } ]
2
boyagm/fetch-repo-list
https://github.com/boyagm/fetch-repo-list
200cecd7a7daa7a1dc2e88be4612fdcea8cdd3e0
90e021a833c5cc9ca7b563677290b17a27b1495c
4dbefd2e1f668036c62973dfaa9db071605208d0
refs/heads/main
2023-01-27T13:45:22.709496
2020-12-12T07:28:53
2020-12-12T07:28:53
320,762,754
0
0
MIT
2020-12-12T06:15:46
2020-12-12T06:15:47
2020-12-12T07:28:53
null
[ { "alpha_fraction": 0.5690425634384155, "alphanum_fraction": 0.5709244608879089, "avg_line_length": 25.911392211914062, "blob_id": "b0adbee7127fef2aa8ecea2f518e8da98b38ddfb", "content_id": "0f6375215885a2207135e448d4099627f869f9ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4251, "license_type": "permissive", "max_line_length": 87, "num_lines": 158, "path": "/main.py", "repo_name": "boyagm/fetch-repo-list", "src_encoding": "UTF-8", "text": "import json \nimport argparse\nfrom datetime import datetime, timedelta\nfrom functools import partial\nfrom gql import gql, Client\nfrom gql.transport.aiohttp import AIOHTTPTransport\n\nclass Repo(object):\n \"\"\"Class store repo information\"\"\"\n def __init__(self, data):\n self.name = data['nameWithOwner']\n self.last_updated = datetime.fromisoformat(data[\"pushedAt\"][:-1])\n template = data['templateRepository']\n if template:\n self.template = template['nameWithOwner']\n else:\n self.template = None\n\n\ndef create_client(token):\n \"\"\"Create a Graph DB query client\"\"\"\n headers = {\n \"Authorization\": f\"token {token}\", \n }\n # Select your transport with a defined url endpoint\n transport = AIOHTTPTransport(url=\"https://api.github.com/graphql\", headers=headers)\n\n # Create a GraphQL client using the defined transport\n client = Client(transport=transport, fetch_schema_from_transport=True)\n return client\n\n\ndef generate_query(cursor, org=None):\n \"\"\"\n Construct a Github GraphQL API query\n \"\"\"\n if org:\n \n return gql(f'''\n {{\n viewer {{\n organization(login: {org}) {{\n {generate_repo_query(cursor)}\n }}\n }}\n }}\n '''\n )\n else:\n return gql(f'''\n {{\n viewer {{\n {generate_repo_query(cursor)}\n }}\n }}\n '''\n )\n\n\ndef generate_repo_query(cursor):\n \"\"\"\n Construct a sub-query to fetch all information\n \"\"\"\n return f'''\n repositories(first: 100 {next_cursor(cursor)}) {{\n edges {{\n cursor\n node {{\n nameWithOwner\n pushedAt\n templateRepository {{\n nameWithOwner\n }}\n }}\n }}\n }}\n '''\n\ndef next_cursor(cursor):\n '''Get after string for query'''\n if cursor:\n return f'after: \"{cursor}\"'\n else:\n return \"\"\n\n\ndef time_filters(repo, last_n_day):\n \"\"\"Return True if the repo is active in last N days\"\"\"\n if repo.last_updated < datetime.today() - timedelta(days=last_n_day):\n return False\n return True\n\n\ndef template_filters(repo, template_name):\n \"\"\"Return True if the repo is created using the template\"\"\"\n if repo.template != template_name:\n return False\n return True\n\n\ndef parse_results(result_json, org=None):\n if org:\n results = result_json['viewer']['organization']['repositories']['edges']\n else:\n results = result_json['viewer']['repositories']['edges']\n \n repo_list = [Repo(x['node']) for x in results]\n last_cursor = results[-1]['cursor']\n return repo_list, last_cursor\n\ndef main():\n parser = argparse.ArgumentParser(description='Process inputs.')\n parser.add_argument('--last_active', type=int, default=None)\n parser.add_argument('--org_name', type=str, default=None)\n parser.add_argument(\n '--template_name', \n type=str, \n default=None)\n parser.add_argument(\n '--token', \n type=str, \n required=True)\n\n args = parser.parse_args()\n\n client = create_client(args.token)\n current_cursor = None\n repo_list = []\n while True:\n # Construct a GraphQL query\n query = generate_query(current_cursor, args.org_name)\n # Execute the query on the transport\n result_json = client.execute(query)\n repos, current_cursor= parse_results(result_json, args.org_name)\n repo_list.extend(repos)\n if len(repos) < 100:\n break\n\n if args.last_active:\n repo_filter = partial(time_filters, last_n_day=args.last_active)\n repo_list = filter(repo_filter, repo_list)\n \n if args.template_name:\n repo_filter = partial(template_filters, template_name=args.template_name)\n repo_list = filter(repo_filter, repo_list)\n\n repo_names = [x.name for x in repo_list]\n for x in repo_list:\n print(x.name, x.template, x.last_updated)\n\n with open(\"repos.txt\", \"w\") as f:\n f.write(f'{{\\\\\"repo\\\\\":{repo_names}}}')\n \n return\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.660516619682312, "alphanum_fraction": 0.6863468885421753, "avg_line_length": 37.71428680419922, "blob_id": "76c8c08c64f042754488fdc90618883980bf6014", "content_id": "7343c691e5f10aa1ae870a3b8fc50efb98c1aa2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 271, "license_type": "permissive", "max_line_length": 91, "num_lines": 7, "path": "/entrypoint.sh", "repo_name": "boyagm/fetch-repo-list", "src_encoding": "UTF-8", "text": "#!/bin/sh -l\n\necho \"Activate Github in last $2 days.\"\necho \"The template getting checked is $3.\"\necho \"The orgnization name is $4.\"\nrepo_list=$(python /src/main.py --token $1 last_active $2 --template_name $3 --org_name $4)\necho \"::set-output name=repo_list::$repo_list\"\n" } ]
2
gaojk/Python_Study
https://github.com/gaojk/Python_Study
a5e6da4fab10fe91c70809d8ee107647e2ff1667
f6980b5b913da9e567fa9747d46ea45306750d8a
5a909652fe5530fc32d0aa3ecf074a8b1d051df0
refs/heads/master
2020-12-22T17:01:59.520413
2018-01-05T15:59:23
2018-01-05T15:59:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6401515007019043, "alphanum_fraction": 0.6654040217399597, "avg_line_length": 22.294116973876953, "blob_id": "b276c765b510e927738ccac5632bb39b418a4ffb", "content_id": "51b61349f444ff4ed6790d864f6a2c5b28b55948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "no_license", "max_line_length": 64, "num_lines": 34, "path": "/list/IterationTest.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n# 迭代\nfrom collections import Iterable\n\n# 定义一个字典\nd = {\"May\": 18, \"Jack\": 20, \"Jean\": 16, \"Abby\": 25}\n\n# 因为dict的存储不是按照list的方式顺序排列,所以,迭代出的结果顺序很可能不一样\n# 迭代key,默认情况下,dict迭代的是key\nfor key in d:\n print(key)\n\n# 迭代value\nfor value in d.values():\n print(value)\n\n# 同时迭代key和value\nfor key, value in d.items():\n print(\"%s : %s\" % (key, value))\n\n# 如何判断一个对象是可迭代对象呢?方法是通过collections模块的Iterable类型判断:\nprint(isinstance('abc', Iterable)) # str是否可迭代 True\nprint(isinstance([1, 2, 3], Iterable)) # list是否可迭代 True\nprint(isinstance(123, Iterable)) # 整数是否可迭代 False\n\n# Python内置的enumerate函数可以把一个list变成索引-元素对,这样就可以在for循环中同时迭代索引和元素本身:\nl = ['a', 'b', 'c', 'd', 'e']\nfor i, value in enumerate(l):\n print(i, value)\n\n# for循环里,同时引用了两个变量\nlt = [(4, 2), (6, 3), (2, 1)]\nfor x, y in lt:\n print(x, y)\n" }, { "alpha_fraction": 0.584725558757782, "alphanum_fraction": 0.6181384325027466, "avg_line_length": 22.27777862548828, "blob_id": "35cf5b57bc90644644b75f96c88fb0798475d3cd", "content_id": "e696329aeacced33e6f6319674a23d96dce3ecf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 55, "num_lines": 36, "path": "/excel/ReadExcel.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n# 读取Excel\n\n# 读2003 excel\nimport xlrd\n# 读2007 excel\nimport openpyxl\n\n\ndef read03Excel(path):\n data = xlrd.open_workbook(path)\n sheets = data.sheet_names()\n sheet = data.sheet_by_name(sheets[0])\n for i in range(0, sheet.nrows):\n row = sheet.row(i)\n for j in range(0, sheet.ncols):\n print(sheet.cell_value(i, j), \"\\t\", end=\"\")\n print()\n\n\ndef read07Excel(path, sheetname):\n data = openpyxl.load_workbook(path) # 打开excel文件\n print(data.get_sheet_names()) # 获取工作簿所有工作表名\n sheet = data.get_sheet_by_name(sheetname) # 获取工作表\n print(sheet.title)\n\n for row in sheet.rows:\n for cell in row:\n print(cell.value, \"\\t\", end=\"\")\n print()\n\n\n# path03 = '../dataFile/case1.xls'\n# read03Excel(path03)\npath07 = '../dataFile/api_case.xlsx'\nread07Excel(path07)\n" }, { "alpha_fraction": 0.553954005241394, "alphanum_fraction": 0.5605765581130981, "avg_line_length": 23.216981887817383, "blob_id": "95c019a34a00ae955bd95e5e166634626ba16c81", "content_id": "8f7c6e8f17df1e5ec973573293ce2787799fdffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3607, "license_type": "no_license", "max_line_length": 100, "num_lines": 106, "path": "/decorator/decoratorTest.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\nfrom datetime import datetime\nimport functools\n\n\"\"\"\n装饰器\n在代码运行期间动态增加功能的方式,称之为“装饰器”(Decorator)\n装饰器其实也就是一个函数,一个用来包装函数的函数,装饰器在函数申明完成的时候被调用,\n调用之后申明的函数被换成一个被装饰器装饰过后的函数\n\"\"\"\n\n\n# 无参装饰器\n# 定义一个能打印日志的decorator\ndef log(func):\n def wrapper(*args, **kw):\n print(\"Hello, %s():\" % func.__name__) # 函数对象有一个__name__属性,可以拿到函数的名字\n return func(*args, **kw)\n\n return wrapper\n\n\n# log是一个decorator,接受一个函数作为参数,并返回一个函数。借助Python的@语法,把decorator置于函数的定义处\n@log\ndef now():\n print(datetime.now())\n\n\n# 调用now()函数,不仅会运行now()函数本身,还会在运行now()函数前打印一行日志\nnow() # 把@log放到now()函数的定义处,相当于执行了语句:now = log(now)\n\"\"\"\n1.log()是一个decorator,返回一个函数,原来的now()函数仍然存在\n2.现在同名的now变量指向了新的函数,于是调用now()将执行新函数,即在log()函数中返回的wrapper()函数\n3.wrapper()函数的参数定义是(*args, **kw),因此,wrapper()函数可以接受任意参数的调用\n4.在wrapper()函数内,首先打印日志,再紧接着调用原始函数。\n\"\"\"\nprint(now.__name__) # 运行结果:wrapper\nprint('-----------------------------------------------------------------------------------------')\n\n\n# 带参装饰器\ndef log(text):\n def decorator(func):\n def wrapper(*args, **kw):\n print(\"Hello, %s, %s():\" % (text, func.__name__))\n return func(*args, **kw)\n\n return wrapper\n\n return decorator\n\n\n@log(\"action\")\ndef now1():\n print(datetime.now())\n\n\nnow1() # 和两层嵌套的decorator相比,3层嵌套的效果是这样的:now = log('execute')(now)\n# 首先执行log('execute'),返回的是decorator函数,再调用返回的函数,参数是now函数,返回值最终是wrapper函数\nprint(now1.__name__) # 运行结果:wrapper\nprint('-----------------------------------------------------------------------------------------')\n\nprint('-----------------------------------------------------------------------------------------')\n\n\n# 经过decorator装饰之后的函数,它们的__name__已经从原来的'now'变成了'wrapper'\n# 因为返回的那个wrapper()函数名字就是'wrapper',所以,需要把原始函数的__name__等属性复制到wrapper()函数中,否则,有些依赖函数签名的代码执行就会出错\n# 不需要编写wrapper.__name__ = func.__name__这样的代码,Python内置的functools.wraps就是干这个事的,所以,一个完整的decorator的写法如下:\ndef log3(func):\n @functools.wraps(func)\n def wrapper(*args, **kw):\n print(\"Hello, %s():\" % func.__name__) # 函数对象有一个__name__属性,可以拿到函数的名字\n return func(*args, **kw)\n\n return wrapper\n\n\n@log3\ndef now3():\n print(datetime.now())\n\n\nnow3()\nprint(now3.__name__)\nprint('-----------------------------------------------------------------------------------------')\n\n\ndef log4(text):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kw):\n print(\"Hello, %s, %s():\" % (text, func.__name__))\n return func(*args, **kw)\n\n return wrapper\n\n return decorator\n\n\n@log(\"WaO\")\ndef now4():\n print(datetime.now())\n\n\nnow4()\nprint(now4.__name__)\n" }, { "alpha_fraction": 0.5649582743644714, "alphanum_fraction": 0.619785487651825, "avg_line_length": 22.33333396911621, "blob_id": "60eb7c18f5b094e18b29b5cf80d18bee9b4c2d98", "content_id": "46a6ad4326d0150e92ee26d56583b0280516c151", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1173, "license_type": "no_license", "max_line_length": 67, "num_lines": 36, "path": "/list/ListComprehensionsTest.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n# 列表生成式\nimport os\n\n# 举个例子,要生成list [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]可以用list(range(1, 11))\nprint(list(range(1, 11)))\n\n# 如果要生成[1x1, 2x2, 3x3, ..., 10x10]\n# 方法一:\nL = []\nfor i in range(1, 11):\n L.append(i * i)\nprint(L)\n\n# 方法二:\nprint([i * i for i in range(1, 11)])\n\n# for循环后面还可以加上if判断,这样我们就可以筛选出仅偶数的平方\nprint([i * i for i in range(1, 11) if i % 2 == 0])\n\n# 还可以使用两层循环,可以生成全排列\nprint([m + n for m in 'ABCDEFG' for n in \"HIJKLMN\"])\n\n# 运用列表生成式,可以写出非常简洁的代码。例如,列出当前目录下的所有文件和目录名,可以通过一行代码实现:\nprint([d for d in os.listdir()])\n\n# for循环其实可以同时使用两个甚至多个变量,比如dict的items()可以同时迭代key和value\nd = {\"May\": 18, \"Jack\": 20, \"Jean\": 16, \"Abby\": 25}\nfor key, value in d.items():\n print(key, \"=\", value)\n\nprint([key + \"=\" + str(value) for key, value in d.items()])\n\n# 把一个list中所有的字符串变成小写\nl = [\"HELLO\", \"WORLD\", \"APPLE\", \"WA\"]\nprint([s.lower() for s in l])" }, { "alpha_fraction": 0.5141414403915405, "alphanum_fraction": 0.5717171430587769, "avg_line_length": 23.75, "blob_id": "d6baf8d906a8b5b742193534fc9cdbf9917393b3", "content_id": "ac2a2c2b771cab65073ba80b60a6944c783863c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1130, "license_type": "no_license", "max_line_length": 71, "num_lines": 40, "path": "/excel/WriteExcel.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n# 写excel\n\n# 写2003 excel\nimport xlwt\n# 写2007 excel\nimport openpyxl\n\n\ndef write03Excel(path, sheetName, value):\n wb = xlwt.Workbook(path)\n sheet = wb.add_sheet(sheetName)\n for i in range(0, 4):\n for j in range(0, len(value[i])):\n sheet.write(i, j, value[i][j])\n wb.save(path)\n print(\"写入数据成功\")\n\n\ndef write07Excel(path, sheetName, value):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = sheetName\n\n for i in range(0, 4):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save(filename=path)\n print(\"写入数据成功\")\n\nvalue = [[\"名称\", \"价格\", \"出版社\", \"语言\"],\n [\"如何高效读懂一本书\", \"22.3\", \"机械工业出版社\", \"中文\"],\n [\"暗时间\", \"32.4\", \"人民邮电出版社\", \"中文\"],\n [\"拆掉思维里的墙\", \"26.7\", \"机械工业出版社\", \"中文\"]]\n\nfile_2003 = '../dataFile/2003.xls'\nfile_2007 = '../dataFile/2007.xlsx'\n\nwrite03Excel(file_2003, \"book\", value)\nwrite07Excel(file_2007, \"book\", value)\n" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 15, "blob_id": "b5bbf699f6c17b54240efff2288f911f691a5126", "content_id": "b997ac38e3f52dd458f75b0804e928e09eeca8fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17, "license_type": "no_license", "max_line_length": 15, "num_lines": 1, "path": "/excel/ExcelUtils.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n\n" }, { "alpha_fraction": 0.6379690766334534, "alphanum_fraction": 0.6412803530693054, "avg_line_length": 26.454545974731445, "blob_id": "813da7355a1f36362fcaaf2b22010319bb617e5d", "content_id": "ce7f2f133c08abd1a71b9bc6a0203cde6a3092aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1078, "license_type": "no_license", "max_line_length": 87, "num_lines": 33, "path": "/spider/Beautifulsoup4Demo01.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n# 爬取博客内容\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n# 请求首页后获取整个html页面\nr = requests.get(\"https://www.cnblogs.com/nbkhic/\")\nblog_content = r.content\n# print(blog_content) # 获取页面html的所有内容\n\n# 用html.parser解析html\nsoup = BeautifulSoup(blog_content, \"html.parser\")\n# 获取所有的class属性为dayTitle,返回Tag类\ntimes = soup.find_all(class_=\"dayTitle\")\n# for time in times:\n# print(time.a.string) # 获取a标签的文本\n# 获取博客的标题\ntitles = soup.find_all(class_=\"postTitle\")\n# for title in titles:\n# print(title.a.string)\n# 获取博客的内容摘要\ndescs = soup.find_all(class_=\"postCon\")\n# for desc in descs:\n# # tag的contents属性可以将tag的子节点以列表的方式输出\n# c = desc.div.contents[0] # 取第一个\n# print(c)\n\nfor time, title, desc in zip(times, titles, descs):\n print(time.a.string)\n print(title.a.string)\n print(desc.div.contents[0])\n print('--------------------------------------------------------------------------')\n" }, { "alpha_fraction": 0.6216704249382019, "alphanum_fraction": 0.6293453574180603, "avg_line_length": 33.07692337036133, "blob_id": "21f24e83dc46d1119a8e4a98f7646e901b6e3412", "content_id": "d23ca651472eeeb6b308f1cecf239e41f22600ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3283, "license_type": "no_license", "max_line_length": 97, "num_lines": 65, "path": "/list/IterableTest.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n# 迭代器\n\"\"\"\n可以直接作用于for循环的数据类型有以下几种:\n一类是集合数据类型,如list、tuple、dict、set、str等;\n一类是generator,包括生成器和带yield的generator function。\n这些可以直接作用于for循环的对象统称为可迭代对象:Iterable。\n\"\"\"\nfrom collections import Iterable, Iterator\n\n# 使用isinstance()判断一个对象是否是Iterable对象\nprint(isinstance([], Iterable)) # True\nprint(isinstance({}, Iterable)) # True\nprint(isinstance('abc', Iterable)) # True\nprint(isinstance((x for x in range(10)), Iterable)) # True\nprint(isinstance(100, Iterable)) # False\n\nprint(\"--------------------------------------------------------------------------------------\")\n\"\"\"\n而生成器不但可以作用于for循环,还可以被next()函数不断调用并返回下一个值,直到最后抛出StopIteration错误表示无法继续返回下一个值了。\n可以被next()函数调用并不断返回下一个值的对象称为迭代器:Iterator。\n\"\"\"\n# 可以使用isinstance()判断一个对象是否是Iterator对象:\nprint(isinstance((x for x in range(10)), Iterator)) # True\nprint(isinstance([], Iterator)) # False\nprint(isinstance({}, Iterator)) # False\nprint(isinstance('abc', Iterator)) # False\n\nprint(\"--------------------------------------------------------------------------------------\")\n# 生成器都是Iterator对象,但list、dict、str虽然是Iterable,却不是Iterator。\n# 把list、dict、str等Iterable变成Iterator可以使用iter()函数:\nprint(isinstance(iter([]), Iterator)) # True\nprint(isinstance(iter({}), Iterator)) # True\nprint(isinstance(iter('abc'), Iterator)) # True\n\n\"\"\"\n为什么list、dict、str等数据类型不是Iterator?\n这是因为Python的Iterator对象表示的是一个数据流,Iterator对象可以被next()函数调用并不断返回下一个数据,直到没有数据时抛出StopIteration错误。\n可以把这个数据流看做是一个有序序列,但我们却不能提前知道序列的长度,只能不断通过next()函数实现按需计算下一个数据,所以Iterator的计算是惰性的,只有在需要返回下一个数据时它才会计算。\nIterator甚至可以表示一个无限大的数据流,例如全体自然数。而使用list是永远不可能存储全体自然数的。\n\"\"\"\nprint(\"--------------------------------------------------------------------------------------\")\n\"\"\"\n凡是可作用于for循环的对象都是Iterable类型;\n凡是可作用于next()函数的对象都是Iterator类型,它们表示一个惰性计算的序列;\n集合数据类型如list、dict、str等是Iterable但不是Iterator,不过可以通过iter()函数获得一个Iterator对象。\n\"\"\"\n# Python的for循环本质上就是通过不断调用next()函数实现的,例如:\nfor x in [1, 2, 3, 4, 5]:\n print(x)\n\n# 实际上完全等价于:\nprint(\"--------------------------------------------------------------------------------------\")\n# 首先获得Iterator对象:\nit = iter([1, 2, 3, 4, 5])\n# 循环:\nwhile True:\n try:\n # 获得下一个值:\n x = next(it)\n print(x)\n except StopIteration:\n # 遇到StopIteration就退出循环\n print(\"done\")\n break\n" }, { "alpha_fraction": 0.6014026999473572, "alphanum_fraction": 0.627410888671875, "avg_line_length": 22.930070877075195, "blob_id": "66b2bcdf9c65ccd0aaf6492bf91c6997c69fe194", "content_id": "e0dd3edfc7a9ed79210dc541728a422a23c3ddc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5526, "license_type": "no_license", "max_line_length": 105, "num_lines": 143, "path": "/list/GeneratorTest.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n# 通过列表生成式,我们可以直接创建一个列表。但是,受到内存限制,列表容量肯定是有限的。\n# 而且,创建一个包含100万个元素的列表,不仅占用很大的存储空间,如果我们仅仅需要访问前面几个元素,那后面绝大多数元素占用的空间都白白浪费了\n# 如果列表元素可以按照某种算法推算出来,那我们是否可以在循环的过程中不断推算出后续的元素呢?这样就不必创建完整的list,从而节省大量的空间\n# 在Python中,这种一边循环一边计算的机制,称为生成器:generator\n# yield关键字\n\n# 要创建一个generator,有很多种方法。第一种方法很简单,只要把一个列表生成式的[]改成(),就创建了一个generator:\nL = [x for x in range(1, 11)]\nprint(L)\nprint(type(L)) # <class 'list'>\n\nG = (x for x in range(1, 11))\nprint(type(G)) # <class 'generator'>\n\n# generator保存的是算法,每次调用next(g),就计算出g的下一个元素的值,直到计算到最后一个元素,没有更多的元素时,抛出StopIteration的错误\nprint(next(G)) # 1\nprint(next(G)) # 2\nprint(next(G)) # 3\nprint(next(G)) # 4\n\nprint('--------------------------------------------')\n\n# 创建了一个generator后,基本上永远不会调用next(),而是通过for循环来迭代它,并且不需要关心StopIteration的错误\nfor n in G:\n print(n)\n\nprint('--------------------------------------------')\n\n\n# generator非常强大。如果推算的算法比较复杂,用类似列表生成式的for循环无法实现的时候,还可以用函数来实现。\n# 比如,著名的斐波拉契数列(Fibonacci),除第一个和第二个数外,任意一个数都可由前两个数相加得到:\n# 1, 1, 2, 3, 5, 8, 13, 21, 34, ...\n# 斐波拉契数列用列表生成式写不出来,但是,用函数把它打印出来却很容易\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n\n'''\n赋值语句:\na, b = b, a + b\n相当于:\nt = (b, a + b) # t是一个tuple\na = t[0]\nb = t[1]\n'''\nprint(fib(6))\n\nprint('--------------------------------------------')\n\n'''\nfib函数实际上是定义了斐波拉契数列的推算规则,可以从第一个元素开始,推算出后续任意的元素,这种逻辑其实非常类似generator。\n\n也就是说,上面的函数和generator仅一步之遥。要把fib函数变成generator,只需要把print(b)改为yield b就可以了:\n'''\n\n\ndef fib_yield(max):\n n, a, b = 0, 0, 1\n while n < max:\n yield b\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n\n# 这就是定义generator的另一种方法。如果一个函数定义中包含yield关键字,那么这个函数就不再是一个普通函数,而是一个generator:\nprint(fib_yield(6)) # <generator object fib_yield at 0x000002626867B3B8>\n\n\n# 最难理解的就是generator和函数的执行流程不一样。函数是顺序执行,遇到return语句或者最后一行函数语句就返回。\n# 而变成generator的函数,在每次调用next()的时候执行,遇到yield语句返回,再次执行时从上次返回的yield语句处继续执行\n# 举个简单的例子,定义一个generator,依次返回数字1,3,5:\ndef odd():\n print('step1')\n yield 1\n print('step2')\n yield 2\n print('step3')\n yield 3\n\n\n# 调用该generator时,首先要生成一个generator对象,然后用next()函数不断获得下一个返回值:\no = odd()\nprint(next(o))\n# step1\n# 1\nprint(next(o))\n# step2\n# 2\nprint(next(o))\n# step3\n# 3\n# print(next(o)) # 抛出异常StopIteration\n'''\n可以看到,odd不是普通函数,而是generator,在执行过程中,遇到yield就中断,下次又继续执行。\n执行3次yield后,已经没有yield可以执行了,所以,第4次调用next(o)就报错。\n'''\nprint('--------------------------------------------')\n# 使用for循环来迭代fib_yield\nfor i in fib_yield(6):\n print(i)\n\nprint('--------------------------------------------')\n# 但是用for循环调用generator时,发现拿不到generator的return语句的返回值。\n# 如果想要拿到返回值,必须捕获StopIteration错误,返回值包含在StopIteration的value中:\ng = fib_yield(6)\nwhile True:\n try:\n x = next(g)\n print('g:', x)\n except StopIteration as e:\n print('Generator return value:', e.value)\n break\n\n\n# 将杨辉三角的每一行看成一个list,写一个生成器(generator),不断输出下一行list\ndef triangel(n):\n L = [1] # 定义一个list[1]\n while True:\n yield L # 打印出该list\n L = [L[x] + L[x + 1] for x in range(len(L) - 1)] # 计算下一行中间的值(除去两边的1)\n L.insert(0, 1) # 在开头插入1\n L.append(1) # 在结尾添加1\n if len(L) > 10: # 仅输出10行\n break\n\nprint('--------------------------------------------')\n# 生成一个generator对象,然后通过for循环迭代输出每一行\na = triangel(10)\nfor i in a:\n print(i)\n\n'''\n注:普通函数和generator生成器的区别:\n1.普通函数调用直接返回结果,generator函数的调用,返回一个generator对象;(调用generator时可以先创建一个对象,再用next()方法不断获得下一个返回值,但实际中通常用for循环实现)\n2.generator在执行过程中,遇到yield就中断,下次又继续执行\n'''\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 15, "blob_id": "ca9314dcb6b5e8b8b29b7632140a44e7eedd5082", "content_id": "f4bf6228bcb0131f62ecdb973bd7f5461a0d11ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15, "license_type": "no_license", "max_line_length": 15, "num_lines": 1, "path": "/list/__init__.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv" }, { "alpha_fraction": 0.6083333492279053, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 12.84615421295166, "blob_id": "815d4e5fc867f5699d8c3a76e66e093d3b02eba0", "content_id": "c4d87041ce6cbb6f096bb0c47d7b7d599857b536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 40, "num_lines": 26, "path": "/lambda/lambdaTest.py", "repo_name": "gaojk/Python_Study", "src_encoding": "UTF-8", "text": "# Author:Jing Lv\n\n\"\"\"\n匿名函数,无函数名\n关键字lambda表示匿名函数\nlambda(关键字) x(函数参数)\nlambda x: x*x\n等价于\ndef f(x):\n return x*x\n用匿名函数有个好处,因为函数没有名字,不必担心函数名冲突。\n匿名函数也是一个函数对象,也可以把匿名函数赋值给一个变量,再利用变量来调用该函数\n也可以把匿名函数作为返回值返回\n\"\"\"\nf1 = lambda n: n * 2\nf2 = lambda x, y: x ** y\n\nprint(f1(5)) # 执行结果10\nprint(f2(2, 2)) # 执行结果4\n\n\ndef f3(x, y):\n return lambda: x * y\n\na = f3(5, 7)\nprint(a)\n" } ]
11
nobuto-m/layer-canal
https://github.com/nobuto-m/layer-canal
51f5dd7b222869ddefc64eb6b397dd284f695d76
084c6839008dfaa2e949ff6ddded8b79802d9124
c121c4ddf4c2252485415773a1cbc4120de6f0c4
refs/heads/master
2020-07-13T09:17:27.878899
2019-08-27T15:07:14
2019-08-27T15:07:14
205,054,603
0
0
NOASSERTION
2019-08-29T01:25:29
2019-08-27T15:07:17
2019-08-27T15:07:18
null
[ { "alpha_fraction": 0.651860773563385, "alphanum_fraction": 0.6662665009498596, "avg_line_length": 36.022220611572266, "blob_id": "13088e742d86ac83094b6ce7c03d4ce0c18c1bdd", "content_id": "0b8be4402a5be43a81f0abe2f99bc4d0e15c4063", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1666, "license_type": "permissive", "max_line_length": 101, "num_lines": 45, "path": "/fetch-charm-store-resources.sh", "repo_name": "nobuto-m/layer-canal", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -eux\n\n# This script will download the resources associated with each charm that\n# canal cares about (ie, flannel and calico) from the charm store. By default,\n# it will pull charm resources from the edge channel. Call it with\n# edge|beta|candidate|stable as the first arg to specify the channel.\n#\n# If you need to construct new resources from upstream binaries, see the\n# build-canal-resources.sh script in this repository.\n\nchannel=${1:-}\nif [ -z ${channel} ]; then\n channel=\"edge\"\nfi\n\n# 'charm' and 'wget' are required\ncommand -v charm >/dev/null 2>&1 || { echo 'charm: command not found'; exit 1; }\ncommand -v wget >/dev/null 2>&1 || { echo 'wget: command not found'; exit 1; }\n\n# list of namespaced charms from which to fetch resources\ncharms=\"~containers/flannel ~containers/calico\"\nfor charm in ${charms}; do\n # get the id (charm-revision), stripping any 'cs:' prefix\n charm_id=$(charm show ${charm} -c ${channel} id | grep -o \"${charm}.*\" | sed -e 's/^cs://')\n\n # get our resources, skipping potential header rows. list looks like:\n # [Service]\n # RESOURCE REVISION\n # flannel-amd64 3\n # flannel-arm64 1\n # flannel-s390x 3\n resources=$(charm list-resources ${charm_id} | grep -v \"\\[Service\\]\" | tail -n +2 | sed -e '/^$/d')\n\n # construct a url and wget each resource. each resource line looks like:\n # flannel-amd64 3\n IFS=$'\\n'\n for res in $resources; do\n res_name=$(echo $res | awk '{print $1}')\n res_rev=$(echo $res | awk '{print $2}')\n res_url=\"https://api.jujucharms.com/charmstore/v5/${charm_id}/resource/${res_name}/${res_rev}\"\n wget ${res_url} -O ${res_name}.tar.gz\n done\n unset IFS\ndone\n" }, { "alpha_fraction": 0.7710622549057007, "alphanum_fraction": 0.7875458002090454, "avg_line_length": 44.5, "blob_id": "af2ae95ff84abee569bb17f4d70be063f43c41c0", "content_id": "47de475c443966601e30f3399b468075cb4fef2f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 546, "license_type": "permissive", "max_line_length": 78, "num_lines": 12, "path": "/versioning.md", "repo_name": "nobuto-m/layer-canal", "src_encoding": "UTF-8", "text": "# layer-canal devs: How to bump Calico release versions\n\nTODO: fix this lousy process\n\n1. Check the component versions: https://docs.projectcalico.org/v2.6/releases/\n (substitute v2.6 for latest version)\n2. Update calicoctl and calico-cni versions in build-calico-resource.sh\n3. Update calico-node image in config.yaml\n (used in templates/calico-node.service)\n4. Update calico-policy-controller image in config.yaml\n (used in templates/calico-policy-controller.yaml)\n5. Update calico_version in reactive/canal.py set_canal_version function\n" }, { "alpha_fraction": 0.6195363998413086, "alphanum_fraction": 0.6274834275245667, "avg_line_length": 26.962963104248047, "blob_id": "951b6990e28f8c13cc492b1301d399b008abec43", "content_id": "b737cf80b3f05b47618da5b8fc438faa27a94be4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3020, "license_type": "permissive", "max_line_length": 71, "num_lines": 108, "path": "/lib/calico_upgrade.py", "repo_name": "nobuto-m/layer-canal", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nimport yaml\nfrom subprocess import check_call, check_output, CalledProcessError\nfrom charms.layer.canal import arch\nfrom charms.reactive import endpoint_from_flag\nfrom charmhelpers.core.hookenv import resource_get, status_set, log\n\nCALICOCTL_PATH = '/opt/calicoctl'\nETCD_KEY_PATH = os.path.join(CALICOCTL_PATH, 'etcd-key')\nETCD_CERT_PATH = os.path.join(CALICOCTL_PATH, 'etcd-cert')\nETCD_CA_PATH = os.path.join(CALICOCTL_PATH, 'etcd-ca')\nCALICO_UPGRADE_DIR = '/opt/calico-upgrade'\nETCD2_DATA_PATH = CALICO_UPGRADE_DIR + '/etcd2.yaml'\nETCD3_DATA_PATH = CALICO_UPGRADE_DIR + '/etcd3.yaml'\n\n\nclass ResourceMissing(Exception):\n pass\n\n\nclass DryRunFailed(Exception):\n pass\n\n\ndef cleanup():\n shutil.rmtree(CALICO_UPGRADE_DIR, ignore_errors=True)\n\n\ndef configure():\n cleanup()\n os.makedirs(CALICO_UPGRADE_DIR)\n\n # Extract calico-upgrade resource\n architecture = arch()\n if architecture == 'amd64':\n resource_name = 'calico-upgrade'\n else:\n resource_name = 'calico-upgrade-' + architecture\n archive = resource_get(resource_name)\n\n if not archive:\n message = 'Missing calico-upgrade resource'\n status_set('blocked', message)\n raise ResourceMissing(message)\n\n check_call(['tar', '-xvf', archive, '-C', CALICO_UPGRADE_DIR])\n\n # Configure calico-upgrade, etcd2 (data source)\n etcd = endpoint_from_flag('etcd.available')\n etcd_endpoints = etcd.get_connection_string()\n etcd2_data = {\n 'apiVersion': 'v1',\n 'kind': 'calicoApiConfig',\n 'metadata': None,\n 'spec': {\n 'datastoreType': 'etcdv2',\n 'etcdEndpoints': etcd_endpoints,\n 'etcdKeyFile': ETCD_KEY_PATH,\n 'etcdCertFile': ETCD_CERT_PATH,\n 'etcdCACertFile': ETCD_CA_PATH\n }\n }\n with open(ETCD2_DATA_PATH, 'w') as f:\n yaml.dump(etcd2_data, f)\n\n # Configure calico-upgrade, etcd3 (data destination)\n etcd3_data = {\n 'apiVersion': 'projectcalico.org/v3',\n 'kind': 'CalicoAPIConfig',\n 'metadata': None,\n 'spec': {\n 'datastoreType': 'etcdv3',\n 'etcdEndpoints': etcd_endpoints,\n 'etcdKeyFile': ETCD_KEY_PATH,\n 'etcdCertFile': ETCD_CERT_PATH,\n 'etcdCACertFile': ETCD_CA_PATH\n }\n }\n with open(ETCD3_DATA_PATH, 'w') as f:\n yaml.dump(etcd3_data, f)\n\n\ndef invoke(*args):\n cmd = [CALICO_UPGRADE_DIR + '/calico-upgrade'] + list(args)\n cmd += [\n '--apiconfigv1', ETCD2_DATA_PATH,\n '--apiconfigv3', ETCD3_DATA_PATH\n ]\n try:\n return check_output(cmd)\n except CalledProcessError as e:\n log(e.output)\n raise\n\n\ndef dry_run():\n output = invoke('dry-run', '--output-dir', CALICO_UPGRADE_DIR)\n if b'Successfully validated v1 to v3 conversion' not in output:\n raise DryRunFailed()\n\n\ndef start():\n invoke('start', '--no-prompts', '--output-dir', CALICO_UPGRADE_DIR)\n\n\ndef complete():\n invoke('complete', '--no-prompts')\n" }, { "alpha_fraction": 0.7926583290100098, "alphanum_fraction": 0.7954820394515991, "avg_line_length": 37.13846206665039, "blob_id": "f39e1d2ba349170f106cd1a91d2ef6fd0c044f28", "content_id": "e3f1609b4e83620b131ea661fe1ae6af558fe14a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2481, "license_type": "permissive", "max_line_length": 82, "num_lines": 65, "path": "/README.md", "repo_name": "nobuto-m/layer-canal", "src_encoding": "UTF-8", "text": "# Canal Charm\n\nCanal is a community-driven initiative that aims to allow users to easily\ndeploy Calico and flannel networking together as a unified networking\nsolution - combining Calico’s industry-leading network policy enforcement with\nthe rich superset of Calico and flannel overlay and non-overlay network\nconnectivity options.\n\nThis charm will deploy flannel and calico as background services, and configure\nCNI to use them, on any principal charm that implements the [kubernetes-cni][]\ninterface.\n\n[kubernetes-cni]: https://github.com/juju-solutions/interface-kubernetes-cni\n\n## Usage\n\nThe canal charm is a [subordinate][]. This charm will require a principal charm\nthat implements the `kubernetes-cni` interface in order to properly deploy.\n\n[subordinate]: https://docs.jujucharms.com/2.4/en/authors-subordinate-applications\n```\njuju deploy cs:~containers/canal\njuju deploy cs:~containers/etcd\njuju deploy cs:~containers/kubernetes-master\njuju deploy cs:~containers/kubernetes-worker\njuju add-relation canal etcd\njuju add-relation canal kubernetes-master\njuju add-relation canal kubernetes-worker\n```\n\n## Configuration\n\n**iface** The interface to configure the flannel SDN binding. If this value is\nempty string or undefined the code will attempt to find the default network\nadapter similar to the following command: \n```bash\nroute | grep default | head -n 1 | awk {'print $8'}\n```\n\n**cidr** The network range to configure the flannel SDN to declare when\nestablishing networking setup with etcd. Ensure this network range is not active\non the vlan you're deploying to, as it will cause collisions and odd behavior\nif care is not taken when selecting a good CIDR range to assign to flannel.\n\n**nagios_context** A string that will be prepended to instance name to set the\nhost name in nagios.If you're running multiple environments with the same\nservices in them this allows you to differentiate between them. Used by the\nnrpe subordinate charm.\n\n**nagios_servicegroups** The comma-separated list of servicegroups that the\ngenerated Nagios checks will belong to.\n\n## Known Limitations\n\nThis subordinate does not support being co-located with other deployments of\nthe canal subordinate (to gain 2 vlans on a single application). If you\nrequire this support please file a bug.\n\nThis subordinate also leverages juju-resources, so it is currently only\navailable on juju 2.0+ controllers.\n\n\n## Further information\n\n- [Canal Project Page](https://github.com/projectcalico/canal)\n" }, { "alpha_fraction": 0.5961995124816895, "alphanum_fraction": 0.5985748171806335, "avg_line_length": 30.575000762939453, "blob_id": "d9ac130ef60e5b92b0a82356c9ed40c5188d9b04", "content_id": "35f4341bfb3c0d4a9b4e672d4ea10aec7beec4a8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1263, "license_type": "permissive", "max_line_length": 74, "num_lines": 40, "path": "/lib/charms/layer/canal.py", "repo_name": "nobuto-m/layer-canal", "src_encoding": "UTF-8", "text": "from subprocess import check_output\nfrom time import sleep\n\n\ndef retry(times, delay_secs):\n \"\"\" Decorator for retrying a method call.\n Args:\n times: How many times should we retry before giving up\n delay_secs: Delay in secs\n Returns: A callable that would return the last call outcome\n \"\"\"\n\n def retry_decorator(func):\n \"\"\" Decorator to wrap the function provided.\n Args:\n func: Provided function should return either True od False\n Returns: A callable that would return the last call outcome\n \"\"\"\n def _wrapped(*args, **kwargs):\n res = func(*args, **kwargs)\n attempt = 0\n while not res and attempt < times:\n sleep(delay_secs)\n res = func(*args, **kwargs)\n if res:\n break\n attempt += 1\n return res\n return _wrapped\n\n return retry_decorator\n\n\ndef arch():\n '''Return the package architecture as a string.'''\n # Get the package architecture for this system.\n architecture = check_output(['dpkg', '--print-architecture']).rstrip()\n # Convert the binary result into a string.\n architecture = architecture.decode('utf-8')\n return architecture\n" } ]
5
Q-KIM/WASP
https://github.com/Q-KIM/WASP
57ffbaf441065273bf42d8a165baa3c93543833d
3bc4f2ee90b579e0fe30da12580c5a68e39f0448
04e7d45e4d9fdd9ca42edd3139cc04b65949d587
refs/heads/master
2021-01-15T08:04:04.155877
2015-07-01T21:45:04
2015-07-01T21:45:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5220223069190979, "alphanum_fraction": 0.5297101140022278, "avg_line_length": 40.074562072753906, "blob_id": "4a279ef6dabf33e0aeb77c2ee9fd485970303c68", "content_id": "8cf9545baf83105fbad54e9057e060558064b099", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18731, "license_type": "permissive", "max_line_length": 216, "num_lines": 456, "path": "/mapping/find_intersecting_snps.py", "repo_name": "Q-KIM/WASP", "src_encoding": "UTF-8", "text": "import sys, pysam, gzip, pdb, argparse, array\n#from pympler import asizeof\n\n#### Class to hold the data for a single SNP\nclass SNP:\n def __init__(self,snp_line):\n snp_split=snp_line.strip().split()\n self.pos=int(snp_split[0])-1\n self.alleles=[snp_split[1],snp_split[2]]\n self.ptype=\"snp\"\n self.max_len=0\n for i in range(len(self.alleles)):\n if self.alleles[i]==\"-\":\n self.alleles[i]=\"\"\n self.ptype=\"indel\"\n elif len(self.alleles[i])>self.max_len:\n self.max_len=len(self.alleles[i])\n if self.max_len>1:\n self.ptype=\"indel\"\n\n haplo_chars=snp_split[5:]\n haplos=array.array('B',[0])*len(haplo_chars)\n \n \n\n def add_allele(self,new_alleles):\n for new_allele in new_alleles:\n if new_allele==\"-\":\n self.ptype=\"indel\"\n new_allele=\"\"\n if not (new_allele in self.alleles):\n self.alleles.append(new_allele)\n if len(new_allele)>self.max_len:\n self.max_len=len(new_allele)\n if self.max_len>1:\n self.ptype=\"indel\"\n\n def shift_indel(self):\n self.pos+=1\n self.max_len-=1\n i=0\n while i < len(self.alleles):\n if len(self.alleles)<=1:\n self.alleles.pop(i)\n else:\n self.alleles[i]=self.alleles[i][1:]\n i+=1\n self.alleles.append(\"\")\n\n#### Class to keep track of all the information read in from the bamfile/snpfile \nclass Bam_scanner:\n # Constructor: opens files, creates initial table\n def __init__(self,is_paired_end,max_window,file_name,keep_file_name,remap_name,remap_num_name,fastq_names,snp_dir):\n self.is_paired_end=is_paired_end\n \n ### Read in all input files and create output files\n self.snp_dir=snp_dir\n self.bamfile=pysam.Samfile(file_name,\"rb\")\n self.keep_bam=pysam.Samfile(keep_file_name,\"wb\",template=self.bamfile)\n self.remap_bam=pysam.Samfile(remap_name,\"wb\",template=self.bamfile)\n self.remap_num_file=gzip.open(remap_num_name,\"w\")\n self.fastqs=[gzip.open(fqn,\"w\") for fqn in fastq_names]\n try:\n self.cur_read=self.bamfile.next()\n except:\n sys.stderr.write(\"No lines available for input\")\n return()\n self.end_of_file=False\n\n self.remap_num=1\n self.ref_match=0\n self.alt_match=0\n self.no_match=0\n self.toss=0\n self.nosnp=0\n self.remap=0\n self.tot=0\n self.printstats = True\n \n self.num_reads=0\n \n self.pos=self.cur_read.pos\n self.chr_num=self.cur_read.tid\n self.chr_name=self.bamfile.getrname(self.cur_read.tid)\n self.max_window=max_window\n \n self.num_reads=0\n\n ### Initialize the read tracking tables\n self.read_table=[[] for x in range(self.max_window)]\n \n ### Initialize the SNP and indel tracking tables\n self.switch_chr()\n \n ### fill all tables\n self.fill_table()\n \n\n # fills the table of reads starting from the current position and extending for the next <max_window> base pairs\n def fill_table(self):\n if self.end_of_file:\n return()\n if self.num_reads==0:\n self.pos=self.cur_read.pos\n self.init_snp_table()\n #self.num_reads+=1000\n while self.cur_read.tid == self.chr_num and self.cur_read.pos<(self.pos+self.max_window):\n self.num_reads+=1\n self.read_table[self.cur_read.pos % self.max_window].append(self.cur_read)\n try:\n self.cur_read=self.bamfile.next()\n except:\n self.empty_table()\n self.end_of_file=True\n return()\n\n if self.cur_read.tid != self.chr_num:\n self.empty_table()\n self.chr_num=self.cur_read.tid\n try:\n self.chr_name=self.bamfile.getrname(self.chr_num)\n except:\n sys.stderr.write(\"Problem with tid: \"+str(self.chr_num)+\"\\n\")\n self.skip_chr()\n self.pos=self.cur_read.pos\n self.switch_chr()\n self.fill_table()\n\n # Switches to looking for SNPs on the next chromosome\n def switch_chr(self):\n chr_match=False\n while not chr_match and not self.end_of_file:\n try:\n self.snpfile = gzip.open(\"%s/%s.snps.txt.gz\"%(self.snp_dir,self.chr_name))\n sys.stderr.write(\"Starting on chromosome \"+self.chr_name+\"\\n\")\n chr_match=True\n except:\n sys.stderr.write(\"SNP file for chromosome \"+self.chr_name+\" is not found. Skipping these reads.\\n\")\n self.skip_chr()\n \n self.end_of_snp_file=False\n self.get_next_snp()\n\n # Initializes the SNP table\n def init_snp_table(self):\n # create an empty SNP table\n self.num_snps=0\n self.indel_dict={}\n self.snp_table=[0 for x in range(self.max_window)]\n self.indel_table=[[] for x in range(self.max_window)]\n # skips SNPs that are upstream of the current read\n while not self.end_of_snp_file and self.cur_snp.pos<self.pos:\n self.get_next_snp()\n\n # adds SNPs downstream of the current read and within the current window\n while not self.end_of_snp_file and self.cur_snp.pos<self.pos+self.max_window:\n if self.cur_snp.ptype==\"snp\":\n self.add_snp()\n else:\n self.add_indel()\n self.get_next_snp()\n \n #sys.stderr.write(str(self.num_snps)+\"\\n\")\n\n def add_snp(self):\n cur_pos=self.cur_snp.pos % self.max_window\n if self.snp_table[cur_pos]==0:\n self.num_snps+=1\n self.snp_table[cur_pos]=self.cur_snp\n elif isinstance(self.snp_table[cur_pos],SNP):\n self.snp_table[cur_pos].add_allele(self.cur_snp.alleles) \n \n def add_indel(self):\n position=self.cur_snp.pos\n if self.indel_dict.has_key(position):\n start=self.indel_dict[position].max_len\n self.indel_dict[position].add_allele(self.cur_snp.alleles)\n else:\n self.indel_dict[position]=self.cur_snp\n start=0\n end=self.indel_dict[position].max_len\n i=start\n while i<end and self.cur_snp.pos+i<self.pos+self.max_window:\n self.indel_table[(self.cur_snp.pos+i)%self.max_window].append(position)\n i+=1\n\n #to read in next SNP or signals end of file\n def get_next_snp(self):\n snp_line=self.snpfile.readline()\n if snp_line:\n self.cur_snp=SNP(snp_line)\n else:\n self.end_of_snp_file=True\n\n # Skips all of the reads coming from this chromosome and moves on to the next\n # Used if the SNP file can't be located\n def skip_chr(self):\n while self.cur_read.tid == self.chr_num:\n try:\n self.cur_read=self.bamfile.next()\n except:\n self.empty_table()\n self.end_of_file=True\n return()\n\n self.chr_num=self.cur_read.tid\n try:\n self.chr_name=self.bamfile.getrname(self.chr_num)\n except:\n sys.stderr.write(\"Problem with tid: \"+str(self.chr_num)+\"\\n\")\n self.skip_chr()\n\n # Processes all reads that map to the current position and removes them from the read table\n # Treats reads as single-end\n def empty_slot_single(self):\n cur_slot=self.pos % self.max_window\n while len(self.read_table[cur_slot])>0:\n self.tot+=1\n read=self.read_table[cur_slot].pop()\n self.num_reads-=1\n seqs=self.check_for_snps(read,0)\n num_seqs=len(seqs)\n if num_seqs==0 or num_seqs>10:\n continue\n if num_seqs==1:\n self.keep_bam.write(read)\n else:\n self.remap_num_file.write(\"%i\\n\"%(num_seqs-1))\n self.remap_num_file.flush()\n self.remap_bam.write(read)\n for seq in seqs[1:]:\n loc_line=\"%i:%s:%i:%i\" % (self.remap_num,self.chr_name,read.pos,num_seqs-1)\n self.fastqs[0].write(\"@%s\\n%s\\n+%s\\n%s\\n\"%(loc_line,seq,loc_line,read.qual))\n self.remap_num+=1\n #if self.printstats: \n # sys.stderr.write(str(self.tot)+\" \"+str(self.nosnp)+\" \"+str(self.remap)+\" \"+str(self.toss)+\"\\n\")\n # self.printstats = False\n #sys.stderr.write(str(self.ref_match)+\" \"+str(self.alt_match)+\" \"+str(self.no_match)+\"\\n\")\n self.pos+=1\n self.shift_SNP_table()\n \n\n # Processes all reads that map to the current position and removes them from the read table\n # Treats reads as paired-end\n def empty_slot_paired(self):\n cur_slot=self.pos % self.max_window\n while len(self.read_table[cur_slot])>0: #While there are reads in this slot\n read=self.read_table[self.pos % self.max_window].pop() #Pop the first read in the slot\n self.num_reads-=1\n pair_chr_num=read.rnext #Figure out the matching read position\n pair_pos=read.mpos \n if pair_chr_num != self.chr_num or pair_pos-self.pos > self.max_window:\n continue\n pair_slot=pair_pos % self.max_window # Find the slot the matching read in\n for indx in range(len(self.read_table[pair_slot])):\n #if self.read_table[pair_slot][indx].qname==read.qname:\n if self.read_table[pair_slot][indx].qname.split(\":\")[-1]==read.qname.split(\":\")[-1]: #for testing purposes\n pair_read=self.read_table[pair_slot].pop(indx)\n self.num_reads-=1\n seq1s=self.check_for_snps(read,0)\n seq2s=self.check_for_snps(pair_read,read.mpos-read.pos)\n num_seqs=len(seq1s)*len(seq2s)\n if num_seqs==0 or num_seqs>32:\n break\n if num_seqs==1:\n self.keep_bam.write(read)\n self.keep_bam.write(pair_read)\n else:\n self.remap_bam.write(read)\n self.remap_bam.write(pair_read)\n self.remap_num_file.write(\"%i\\n\"% (2*(num_seqs-1)))\n first=True\n for seq1 in seq1s:\n for seq2 in seq2s:\n if first:\n left_pos=min(read.pos,pair_read.pos)\n right_pos=max(read.pos,pair_read.pos)\n loc_line=\"%i:%s:%i:%i:%i\" % (self.remap_num,self.chr_name,left_pos,right_pos,num_seqs-1)\n self.fastqs[0].write(\"@%s\\n%s\\n+%s\\n%s\\n\"%(loc_line,seq1,loc_line,read.qual))\n self.fastqs[1].write(\"@%s\\n%s\\n+%s\\n%s\\n\"%(loc_line,self.reverse_complement(seq2),loc_line,pair_read.qual))\n first=False\n self.remap_num+=1\n break # stop searching for the pair since it was found\n \n #sys.stderr.write(str(self.ref_match)+\" \"+str(self.alt_match)+\" \"+str(self.no_match)+\" \"+str(self.toss)+\"\\n\")\n self.pos+=1\n self.shift_SNP_table()\n \n\n # Checks a single aligned read for overlapping SNPs and created alternative sequences for remapping\n def check_for_snps(self,read,start_dist):\n indx=read.pos % self.max_window\n p=0\n num_snps=0\n seg_len=start_dist\n seqs=[read.seq]\n if start_dist>0:\n has_junc=False\n for cigar in read.cigar:\n seg_len+=cigar[1]\n if seg_len>self.max_window:\n sys.stderr.write(\"Segment distance (from read pair and junction separation) is too large. A read has been thrown out. Consider increasing the max window size.\\n\")\n return([])\n if cigar[0] == 4: #if CIGAR indicates a soft-clipping\n p=p+cigar[1]\n elif cigar[0] == 0: #if CIGAR indicates a match alignment to the reference genome\n for i in range(cigar[1]): \n if len(self.indel_table[indx])==0:\n snp=self.snp_table[indx]\n if snp!=0:\n num_snps+=1\n if num_snps>10: #If there are more than 10 snps overlapping, throw out the read to prevent memory blow-up\n return([])\n init_seqs=list(seqs)\n for seq in init_seqs:\n matches=0\n #if seq[p] not in snp.alleles:\n # sys.stderr.write(str(start_dist)+\" \"+seq[p]+\" \"+str(snp.alleles)+\"\\n\")\n for geno in snp.alleles:\n if seq[p]==geno:\n matches+=1\n for alt_geno in snp.alleles:\n if not alt_geno == geno:\n new_seq=seq[:p]+alt_geno+seq[p+1:]\n seqs.append(new_seq)\n if matches==0:\n self.no_match+=1\n else:\n self.ref_match+=1\n else: #it's an indel, throw it out\n self.toss+=1\n return([])\n indx=(indx+1) % self.max_window\n p+=1\n elif cigar[0]==3: # if it is skipped in the reference genome (splice junction)\n indx=(indx+cigar[1]) % self.max_window\n has_junc=True\n else: #if there is a non-N/M in the read CIGAR, throw out the read\n self.toss+=1\n return([])\n \n if len(seqs)==1:\n self.nosnp+=1\n else:\n self.remap+=1\n return seqs\n # Shifts the SNP table over one position and makes sure that indels are not lost\n def shift_SNP_table(self): \n ### Current slot to fill is the position + max_window - 1\n cur_slot=(self.pos-1)%self.max_window\n\n ### Delete indels that are no longer used (if they ended at the previous position)\n for indel_pos in self.indel_table[cur_slot]:\n #sys.stderr.write(str(indel_pos+self.indel_dict[indel_pos].max_len-1)+\"\\t\"+str(self.pos-1)+\"\\t\"+str(self.indel_dict[indel_pos].max_len)+\"\\n\")\n if indel_pos+self.indel_dict[indel_pos].max_len-1==self.pos-1:\n del self.indel_dict[indel_pos]\n \n self.indel_table[cur_slot]=[]\n ### Carry over indels from the previous slot\n for indel_pos in self.indel_table[cur_slot-1]:\n if indel_pos+self.indel_dict[indel_pos].max_len-1>=self.pos+self.max_window-1:\n self.indel_table[cur_slot].append(indel_pos)\n\n if self.snp_table[cur_slot]!=0:\n self.num_snps-=1\n self.snp_table[cur_slot]=0\n\n #### See if there is a SNP overlapping the current spot\n while not self.end_of_snp_file and self.pos+self.max_window-1 > self.cur_snp.pos:\n sys.stderr.write(str(self.num_snps)+\" \"+str(self.pos)+\" \"+str(self.cur_snp.pos)+\" !!!\\n\")\n sys.stderr.write(\"SNP out of order has been skipped\\n\")\n self.get_next_snp()\n\n while not self.end_of_snp_file and self.cur_snp.pos==self.pos+self.max_window-1:\n if self.cur_snp.ptype==\"snp\":\n self.add_snp()\n else:\n self.add_indel()\n if not self.cur_snp.pos in self.indel_table[cur_slot]:\n self.indel_table[cur_slot].append(cur_snp.pos)\n self.get_next_snp()\n\n # Completely empties the read_table by repeatedly calling empty_slot function \n def empty_table(self):\n end_pos=self.pos+self.max_window\n while self.pos < end_pos:\n if self.is_paired_end:\n self.empty_slot_paired()\n else:\n self.empty_slot_single()\n\n def complement(self,letter):\n if letter=='A':\n return('T')\n elif letter=='T':\n return('A')\n elif letter=='C':\n return('G')\n elif letter=='G':\n return('C')\n else:\n return(letter)\n\n def reverse_complement(self,read):\n reverse=\"\"\n for letter in read:\n reverse=self.complement(letter)+reverse\n return reverse\n\ndef main():\n parser=argparse.ArgumentParser()\n parser.add_argument(\"-p\", action='store_true', dest='is_paired_end', default=False)\n parser.add_argument(\"-m\", action='store', dest='max_window', type=int, default=100000)\n parser.add_argument(\"infile\", action='store')\n parser.add_argument(\"snp_dir\", action='store')\n \n options=parser.parse_args()\n infile=options.infile\n snp_dir=options.snp_dir\n name_split=infile.split(\".\")\n \n if len(name_split)>1:\n pref=\".\".join(name_split[:-1])\n else:\n pref=name_split[0]\n\n pysam.sort(infile,pref+\".sort\")\n\n sort_file_name=pref+\".sort.bam\"\n keep_file_name=pref+\".keep.bam\"\n remap_name=pref+\".to.remap.bam\"\n remap_num_name=pref+\".to.remap.num.gz\"\n\n if options.is_paired_end:\n fastq_names=[pref+\".remap.fq1.gz\",pref+\".remap.fq2.gz\"]\n else:\n fastq_names=[pref+\".remap.fq.gz\"]\n\n bam_data=Bam_scanner(options.is_paired_end,options.max_window,sort_file_name,keep_file_name,remap_name,remap_num_name,fastq_names,snp_dir)\n bam_data.fill_table()\n #i=0\n while not bam_data.end_of_file:\n #i+=1\n #if i>50000:\n #sys.stderr.write(str(asizeof.asizeof(bam_data))+\"\\t\"+str(asizeof.asizeof(bam_data.snp_table))+\"\\t\"+str(asizeof.asizeof(bam_data.read_table))+\"\\t\"+str(bam_data.num_reads)+\"\\t\"+str(bam_data.num_snps)+\"\\n\")\n #sys.stderr.write(str(asizeof.asizeof(bam_data))+\"\\t\"+str(bam_data.num_reads)+\"\\t\"+str(bam_data.num_snps)+\"\\t\"+str(len(bam_data.indel_dict))+\"\\n\")\n #i=0\n if options.is_paired_end:\n bam_data.empty_slot_paired()\n else:\n bam_data.empty_slot_single()\n bam_data.fill_table()\n \n sys.stderr.write(\"Finished!\\n\")\n\nmain()\n\n" }, { "alpha_fraction": 0.6152950525283813, "alphanum_fraction": 0.6230590343475342, "avg_line_length": 29.294116973876953, "blob_id": "f028cb7ac240c8742169434900f8fd8578c8b5f5", "content_id": "4b27dd0f894c6faf781d921a52953ce07e8b777e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2576, "license_type": "permissive", "max_line_length": 97, "num_lines": 85, "path": "/mapping/filter_remapped_reads.py", "repo_name": "Q-KIM/WASP", "src_encoding": "UTF-8", "text": "import sys, pysam, gzip, pdb, argparse, pdb\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-p\", action='store_true', dest='is_paired_end', default=False)\nparser.add_argument(\"orig_bam\")\nparser.add_argument(\"remap_bam\")\nparser.add_argument(\"keep_bam\")\nparser.add_argument(\"orig_num_file\")\n\noptions= parser.parse_args()\n\norig_bam=pysam.Samfile(options.orig_bam,\"rb\")\nremap_bam=pysam.Samfile(options.remap_bam,\"rb\")\nkeep_bam=pysam.Samfile(options.keep_bam,\"wb\",template=orig_bam)\norig_num_file=gzip.open(options.orig_num_file)\n\ncorrect_maps=[]\nend_of_file=False\n\n\n# Get a list of reads that remapped correctly\nremap_read=remap_bam.next()\n\nwhile not end_of_file: \n chrm=remap_read.qname.strip().split(\":\")[1]\n if remap_read.is_reverse:\n pos=int(remap_read.qname.strip().split(\":\")[3])\n else:\n pos=int(remap_read.qname.strip().split(\":\")[2])\n read_num=int(remap_read.qname.strip().split(\":\")[0])\n if remap_read.tid != -1 and remap_read.pos==pos and remap_bam.getrname(remap_read.tid)==chrm:\n dels=0 #Throw out the remapped read if it remapped with a deletion...for now\n for cig in remap_read.cigar:\n if not cig[0] in (0,3,4):\n dels+=1\n if dels==0:\n correct_maps.append(read_num)\n try:\n remap_read=remap_bam.next()\n except:\n end_of_file=True\n\n# Sort this list\ncorrect_maps.sort()\n\n#pdb.set_trace()\nsys.stderr.write(str(len(correct_maps))+\" reads remapped to the correct position\\n\")\n\n# Pull out original aligned reads if all of the alternatives mapped correctly\n\norig_read=orig_bam.next()\norig_num=int(orig_num_file.readline().strip())\nline_num=1\n\nmap_indx=0\ncorrect=0\nend_of_file=False\n\n\nwhile not end_of_file and map_indx< len(correct_maps) and line_num <= correct_maps[-1]:\n if line_num < correct_maps[map_indx]:\n if orig_num==correct:\n keep_bam.write(orig_read)\n if options.is_paired_end:\n try:\n orig_read=orig_bam.next()\n except:\n sys.stderr.write(\"File ended unexpectedly (no pair found)\")\n exit()\n if orig_num==correct:\n keep_bam.write(orig_read)\n \n line_num+=1\n correct=0\n try:\n orig_read=orig_bam.next()\n orig_num=int(orig_num_file.readline().strip())\n except:\n end_of_file=True\n elif line_num == correct_maps[map_indx]:\n correct+=1\n map_indx+=1\n else:\n sys.stderr.write(\"There was a problem with the index sorting\\n\")\n exit()\n\n" } ]
2
rr39943/pop_density-edu_level
https://github.com/rr39943/pop_density-edu_level
a4e55e126ed348d77cbd0cfd8f654d797e4cab3e
cbda8e9b290df64dbd0db586a92d8498b32a5427
532093164af1280772681e18327f31b1253bad59
refs/heads/master
2022-12-12T12:08:30.107717
2018-05-01T11:21:32
2018-05-01T11:21:32
130,843,774
0
0
MIT
2018-04-24T11:29:51
2018-05-01T11:22:40
2022-12-08T02:11:23
Python
[ { "alpha_fraction": 0.6303284168243408, "alphanum_fraction": 0.6443046927452087, "avg_line_length": 35.69230651855469, "blob_id": "8998fa36864fde9335e0b2f911b77f8e59f2893c", "content_id": "f3631fc4f90be5c0e03b2944dea224a4d1b2c0e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 83, "num_lines": 39, "path": "/scripts/clean_canton_surf_pop.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport sys\nimport os\nsys.path.append('scripts/lib')\nfrom clean_utils import cleanUtils\n\ndef create_csv_file(file_name_source, file_name_dest):\n \"\"\"\n Parse the Excel sheet provided and extract the total of population of cantons,\n habitable surface and total surface.\n It is working only with data/raw_data/su-f-01.02.04.04.xlsx. It create a csv\n file with the cantons abbreviated and the total of population.\n \"\"\"\n df = pd.read_excel(file_name_source,\n skiprows=14,\n skip_footer=9,\n usecols=[0, 5, 7, 9],\n names=['Cantons', 'SurfHabAndInf', 'SurfTotal', 'PopTotal'])\n\n # Get digrams for the cantons\n df['Cantons'] = df['Cantons'].apply(cleanUtils.canton_name_to_abbreviation)\n\n # Convert ha to km2\n df['SurfHabAndInf'] = df['SurfHabAndInf'].apply(cleanUtils.convert_ha_to_km2)\n df['SurfTotal'] = df['SurfTotal'].apply(cleanUtils.convert_ha_to_km2)\n\n df['PopBySurfHabAndInf'] = df['PopTotal'] / df['SurfHabAndInf']\n df['PopBySurfTotal'] = df['PopTotal'] / df['SurfTotal']\n\n # Test if destination folder exists, if not creates it\n if not(os.path.isdir(os.path.dirname(file_name_dest))):\n os.makedirs(os.path.dirname(file_name_dest))\n\n df.to_csv(file_name_dest, index=False)\n\n\nif __name__ == '__main__':\n create_csv_file(sys.argv[1], sys.argv[2])\n" }, { "alpha_fraction": 0.6204933524131775, "alphanum_fraction": 0.6347248554229736, "avg_line_length": 33, "blob_id": "0795027a641c11a9d9618b26c6463d7a34c02f5b", "content_id": "d6e2d347500de77f8f7a6ff0e30479a7b7c5fd72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "permissive", "max_line_length": 84, "num_lines": 31, "path": "/scripts/clean_canton_univ_edu.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport sys\nimport os\nsys.path.append('scripts/lib')\nfrom clean_utils import cleanUtils\n\ndef create_csv_file(file_name_source, file_name_dest):\n \"\"\"\n Parse the Excel sheet provided and extract the total of persons who have a\n diploma of a university. It is working only with data/raw_data/T_02_02_1_01.xls.\n It create a csv file with the cantons abbreviated and highest formation.\n \"\"\"\n df = pd.read_excel(file_name_source,\n skiprows=4,\n skip_footer=10,\n usecols=[2, 13],\n names=['Cantons', 'UnivEdu'])\n\n # Get digrams for the cantons\n df['Cantons'] = df['Cantons'].apply(cleanUtils.canton_name_to_abbreviation)\n\n # Test if destination folder exists, if not creates it\n if not(os.path.isdir(os.path.dirname(file_name_dest))):\n os.makedirs(os.path.dirname(file_name_dest))\n\n df.to_csv(file_name_dest, index=False)\n\n\nif __name__ == '__main__':\n create_csv_file(sys.argv[1], sys.argv[2])\n" }, { "alpha_fraction": 0.6024723649024963, "alphanum_fraction": 0.6232921481132507, "avg_line_length": 33.155555725097656, "blob_id": "f954ab3332056ede1b047234c315eda9284fd69d", "content_id": "c76abed3e384459a787067207b1e7862e795e1f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "permissive", "max_line_length": 92, "num_lines": 45, "path": "/tests/test_clean_canton_univ_edu.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nimport pandas as pd\nimport numpy as np\nfrom scripts import clean_canton_univ_edu\n\n# Empty the \"tests/test_data/\" folder\nfile_list = os.listdir('tests/test_data/')\nfor f in file_list:\n if f == 'cantons_univ_edu.csv':\n os.remove('tests/test_data/' + f)\n\n# Create the processed csv file in the \"tests/test_data/\" folder\nclean_canton_univ_edu.create_csv_file('data/raw_data/su-f-40.02.15.08.03-2016.xlsx',\n 'tests/test_data/cantons_univ_edu.csv')\n\nclass TestCleanCantonUnivEdu(unittest.TestCase):\n\n def test_create_csv(self):\n \"\"\"\n Test if a csv file is created.\n \"\"\"\n self.assertTrue(os.path.isfile('tests/test_data/cantons_univ_edu.csv'))\n\n def test_columns_csv(self):\n \"\"\"\n Test if the second column is \"UnivEdu\".\n \"\"\"\n list_cols = pd.read_csv('tests/test_data/cantons_univ_edu.csv').columns\n self.assertEqual(list_cols[1], 'UnivEdu')\n\n def test_parse_csv_univ_edu(self):\n \"\"\"\n Test if the processed file contains 48651 for canton SG (St. Gallen).\n \"\"\"\n df = pd.read_csv('tests/test_data/cantons_univ_edu.csv')\n self.assertEqual(np.around(df.loc[df['Cantons']=='SG', 'UnivEdu'].values[0]), 48651)\n\n\n def test_parse_csv_length(self):\n \"\"\"\n Test if there are 26 rows in the processed file corresponding to the 26 cantons.\n \"\"\"\n df = pd.read_csv('tests/test_data/cantons_univ_edu.csv')\n self.assertEqual(len(df.index), 26)\n" }, { "alpha_fraction": 0.46321526169776917, "alphanum_fraction": 0.692098081111908, "avg_line_length": 14.956521987915039, "blob_id": "8184210e87d5804c391bec1d6ccd261360285350", "content_id": "e5f58ca6c3c7f670daf36b565d1aff31d7a19abe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 367, "license_type": "permissive", "max_line_length": 24, "num_lines": 23, "path": "/requirements.txt", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "appdirs==1.4.3\ncertifi==2018.4.16\nchardet==3.0.4\nConfigArgParse==0.13.0\ncycler==0.10.0\ndatrie==0.7.1\nidna==2.6\nkiwisolver==1.0.1\nmatplotlib==2.2.2\nnumpy==1.14.2\npandas==0.22.0\npyparsing==2.2.0\npython-dateutil==2.7.2\npytz==2018.4\nPyYAML==3.12\nratelimiter==1.2.0.post0\nrequests==2.18.4\nscipy==1.0.1\nsix==1.11.0\nsnakemake==4.8.0\nurllib3==1.22\nwrapt==1.10.11\nxlrd==1.1.0\n" }, { "alpha_fraction": 0.6069711446762085, "alphanum_fraction": 0.6213942170143127, "avg_line_length": 34.55555725097656, "blob_id": "0651978add7a60b580093443c2cba77a6dcbbd5a", "content_id": "0cc845047ce4df746fe0ded66075e4aaf4813b55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4160, "license_type": "permissive", "max_line_length": 140, "num_lines": 117, "path": "/scripts/calculate_results.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n\ndef pearson_corr(serie_1, serie_2):\n \"\"\"\n Caculate the pearson correlation of the two provided series and return a text\n to be integrated in the report.\n \"\"\"\n\n # Calculate the correlation\n p_corr = stats.pearsonr(serie_1.values, serie_2.values)\n\n # Prepare the report\n txt = \"\"\"___\\n\\nPearson correlation between *{}* and *{}*: **{}**\\n\n* Risk error: **{} %** \\n\\n\"\"\".format(serie_1.name,\n serie_2.name,\n np.around(p_corr[0], 3),\n np.around(p_corr[1]*100, 4))\n return txt\n\n\ndef create_report(file_name_source, file_name_dest):\n \"\"\"\n Parse cleaned csv files (\"data/processed_data/cantons_surf_pop.csv\" and\n \"data/processed_data/cantons_univ_edu.csv\") and produce a new processed\n csv file.\n \"\"\"\n\n # Variable to store report string\n txt = '# Analysis Results\\n\\n'\n\n # read the data\n df = pd.read_csv(file_name_source)\n\n txt += pearson_corr(df['RatPopUnivLevel'], df['PopBySurfTotal'])\n\n txt += pearson_corr(df['RatPopUnivLevel'], df['PopBySurfHabAndInf'])\n\n txt += \"\"\"____\\n\\n![chart](./chart.png)\"\"\"\n\n # Test if destination folder exists, if not creates it\n if not(os.path.isdir(os.path.dirname(file_name_dest))):\n os.makedirs(os.path.dirname(file_name_dest))\n\n # create report file\n with open(file_name_dest, 'w') as f:\n f.write(txt)\n # print(RatPopUnivLevel_PopBySurfTotal, RatPopUnivLevel_SurfHabAndInf)\n\ndef create_subplot(**kwargs):\n \"\"\"\n Create one subplot.\n \"\"\"\n line_label = 'Part of pop. with university diploma'\n kwargs['graph'].set_title(kwargs['chart_title'])\n bars = kwargs['graph'].bar(range(26), kwargs['df'].iloc[:,-1].values, color='blue', width=0.6, alpha=0.5)\n kwargs['graph'].set_xticks(range(26))\n kwargs['graph'].set_xticklabels(kwargs['df']['Cantons'].values)\n ax_bis = kwargs['graph'].twinx()\n line = ax_bis.plot(range(26), kwargs['df']['RatPopUnivLevel'].values, color='r', linewidth=3)\n kwargs['graph'].set_xlabel('Cantons')\n kwargs['graph'].set_ylabel(kwargs['bars_label'])\n ax_bis.set_ylabel(line_label)\n ax_bis.set_ylim(ymin=0)\n ax_bis.set_yticklabels(['{:2.0f}%'.format(x*100) for x in ax_bis.get_yticks()])\n plt.legend((bars[0], line[0]), (kwargs['bars_label'], line_label))\n\n\ndef create_chart(file_name_source, file_name_dest):\n \"\"\"\n Create a chart with the population of canton by km2 and the part of the population\n with a university diploma.\n \"\"\"\n\n # Sort the row by pop. density\n df1 = pd.read_csv(file_name_source).sort_values(by='PopBySurfTotal', ascending=False).copy()\n df2 = df1.copy().sort_values(by='PopBySurfHabAndInf', ascending=False)\n\n fig = plt.figure(figsize=(12, 12))\n\n # First subplot\n ax1 = fig.add_subplot(211)\n ax1 = create_subplot(graph=ax1,\n df=df1.loc[:,['Cantons', 'RatPopUnivLevel', 'PopBySurfTotal']],\n bars_label = 'Pop. by km2',\n chart_title='Comparison: density pop. vs part of pop. with university diploma')\n\n # Second subplot\n ax2 = fig.add_subplot(212)\n ax2 = create_subplot(graph=ax2,\n df=df2.loc[:,['Cantons', 'RatPopUnivLevel', 'PopBySurfHabAndInf']],\n bars_label='Pop. by km2 (habitable and infrastructure)',\n chart_title='Comparison: density pop. (habitable and infrastructure part) vs part of pop. with university diploma')\n\n plt.savefig(file_name_dest)\n\n\ndef main(file_name_source, action, file_name_dest):\n \"\"\"\n Calculate results:\n - action \"--build-report\": calculate Pearson correlation between\n - action \"--build-graph\": build png chart\n \"\"\"\n if action == '--build-report':\n create_report(file_name_source, file_name_dest)\n if action == '--build-chart':\n create_chart(file_name_source, file_name_dest)\n\nif __name__ == '__main__':\n main(sys.argv[1], sys.argv[2], sys.argv[3])\n" }, { "alpha_fraction": 0.6199407577514648, "alphanum_fraction": 0.6367226243019104, "avg_line_length": 31.677419662475586, "blob_id": "c7521c6c94e91dc8e6dd7478504b02fa4c67e771", "content_id": "d1e16ee5f1621947c4615bf389247ade088e03ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "permissive", "max_line_length": 100, "num_lines": 31, "path": "/tests/test_clean_utils.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import unittest\nimport sys\nsys.path.append('scripts/lib')\nfrom clean_utils import cleanUtils\n\nclass TestCleanUtils(unittest.TestCase):\n\n def test_read_json(self):\n \"\"\"\n Switzerland has 26 cantons but the json has more keys (orthographcical variations). Check if\n the object returned has 47 keys.\n \"\"\"\n self.assertEqual(len(cleanUtils._read_json('abbreviated_cantons.json')), 47)\n\n def test_canton_name_to_abbreviation_1(self):\n \"\"\"\n Test if for \"Zurich\", the function return \"ZH\".\n \"\"\"\n self.assertEqual(cleanUtils.canton_name_to_abbreviation('Zurich'), 'ZH')\n\n def test_canton_name_to_abbreviation_2(self):\n \"\"\"\n Test if for \"Neuchâtel\" return \"NE\".\n \"\"\"\n self.assertEqual(cleanUtils.canton_name_to_abbreviation('Neuchâtel'), 'NE')\n\n def test_convert_ha_to_km2(self):\n \"\"\"\n Test if conversion of ha to km2 is working.\n \"\"\"\n self.assertEqual(cleanUtils.convert_ha_to_km2(1000), 10)\n" }, { "alpha_fraction": 0.6491442322731018, "alphanum_fraction": 0.6625916957855225, "avg_line_length": 31.719999313354492, "blob_id": "43aa8ff8547d65214e519274df3556dd44452895", "content_id": "71029d44c267b734e2b82ac280454203ba6138f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 818, "license_type": "permissive", "max_line_length": 76, "num_lines": 25, "path": "/scripts/build_dataframe.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport os\nimport sys\nsys.path.append('scripts/lib')\n\ndef create_csv_file(file_name_source_1, file_name_source_2, file_name_dest):\n \"\"\"\n Parse cleaned csv files (\"data/processed_data/cantons_surf_pop.csv\" and\n \"data/processed_data/cantons_univ_edu.csv\") and produce a new processed\n csv file.\n \"\"\"\n df1 = pd.read_csv(file_name_source_1)\n df2 = pd.read_csv(file_name_source_2)\n df = df1.merge(df2, on='Cantons')\n\n df['RatPopUnivLevel'] = df['UnivEdu'] / df['PopTotal']\n\n # Test if destination folder exists, if not creates it\n if not(os.path.isdir(os.path.dirname(file_name_dest))):\n os.makedirs(os.path.dirname(file_name_dest))\n\n df.to_csv(file_name_dest, index=False)\n\nif __name__ == '__main__':\n create_csv_file(sys.argv[1], sys.argv[2], sys.argv[3])\n" }, { "alpha_fraction": 0.6545064449310303, "alphanum_fraction": 0.7246065735816956, "avg_line_length": 98.85713958740234, "blob_id": "c8c69ac577cb9c6a1ebfb8c298e32571952e90e4", "content_id": "9f736ed141e3da8ebeb790b386ff06a6a9e63dd2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1399, "license_type": "permissive", "max_line_length": 204, "num_lines": 14, "path": "/documentation/codebook.md", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "# Codebook\nThis table describes all the head columns used in the intermediate and processed data.\n\n\nTags | Type | Unit | Description | Data files | Sources\n--- | --- | --- | --- | --- | ---\nCantons | string | - | Official designation of canton with two uppercase chars | cantons_surf_pop.csv, cantons_surf_edu.csv, cantons_surf_pop_edu.csv | https://en.wikipedia.org/wiki/Cantons_of_Switzerland\nUnivEdu | float | nb persons | Estimated number of persons holding a university degree or equivalent (\"haute école\") | cantons_surf_edu.csv, cantons_surf_pop_edu.csv | T_02_02_1_01.xls\nSurfHabAndInf | float | square km | Habitable and infrastructures surface of cantons | cantons_surf_pop.csv, cantons_surf_pop_edu.csv | su-f-40.02.15.08.03-2016.xlsx\nSurfTotal | float | square km | Surface of the cantons | cantons_surf_pop.csv, cantons_surf_pop_edu.csv | su-f-40.02.15.08.03-2016.xlsx\nPopTotal | integer | nb persons | Pupulation of the canton | cantons_surf_pop.csv, cantons_surf_pop_edu.csv | su-f-40.02.15.08.03-2016.xlsx\nPopBySurfHabAndInf | float | - | Ratio PopTotal/SurfHabAndInf | cantons_surf_pop_edu.csv | su-f-40.02.15.08.03-2016.xlsx\nPopBySurfTotal | float | - | Ratio of PopTotal/SurfTotal | cantons_surf_pop_edu.csv | su-f-40.02.15.08.03-2016.xlsx\nRatPopUnivLevel | float | - | Ratio of UnivEdu/PopTotal | cantons_surf_pop_edu.csv | T_02_02_1_01.xls, su-f-40.02.15.08.03-2016.xlsx\n" }, { "alpha_fraction": 0.6174682378768921, "alphanum_fraction": 0.6329463720321655, "avg_line_length": 40.1136360168457, "blob_id": "6afd66bdcd4f24222246c7008cd1cf602c09301b", "content_id": "94d3d27a58688604a403870b448a2f2a682bb82c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1809, "license_type": "permissive", "max_line_length": 131, "num_lines": 44, "path": "/tests/test_clean_canton_surf_pop_edu.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nimport pandas as pd\nimport numpy as np\nfrom scripts import clean_canton_surf_pop\nfrom scripts import clean_canton_univ_edu\nfrom scripts import build_dataframe\n\n# Empty the \"tests/test_data/\" folder\nfile_list = os.listdir('tests/test_data/')\nfor f in file_list:\n if f in ['cantons_surf_pop_edu.csv', 'tests/test_data/cantons_surf_pop_test.csv', 'tests/test_data/cantons_univ_edu_test.csv']:\n os.remove('tests/test_data/' + f)\n\n# Create the processed csv file in the \"tests/test_data/\" folder\nclean_canton_surf_pop.create_csv_file('data/raw_data/T_02_02_1_01.xls',\n 'tests/test_data/cantons_surf_pop_test.csv')\nclean_canton_univ_edu.create_csv_file('data/raw_data/su-f-40.02.15.08.03-2016.xlsx',\n 'tests/test_data/cantons_univ_edu_test.csv')\nbuild_dataframe.create_csv_file('tests/test_data/cantons_surf_pop_test.csv',\n 'tests/test_data/cantons_univ_edu_test.csv',\n 'tests/test_data/cantons_surf_pop_edu.csv')\n\nclass TestCleanCantonSurfPopEdu(unittest.TestCase):\n\n def test_create_csv(self):\n \"\"\"\n Test if a csv file is created.\n \"\"\"\n self.assertTrue(os.path.isfile('tests/test_data/cantons_surf_pop_edu.csv'))\n\n def test_columns_csv(self):\n \"\"\"\n Test if the second column is \"UnivEdu\".\n \"\"\"\n list_cols = pd.read_csv('tests/test_data/cantons_surf_pop_edu.csv').columns\n self.assertEqual(len(list_cols), 8)\n\n def test_parse_csv_length(self):\n \"\"\"\n Test if there are 26 rows in the processed file corresponding to the 26 cantons.\n \"\"\"\n df = pd.read_csv('tests/test_data/cantons_surf_pop_edu.csv')\n self.assertEqual(len(df.index), 26)\n" }, { "alpha_fraction": 0.5936277508735657, "alphanum_fraction": 0.616545557975769, "avg_line_length": 34.779998779296875, "blob_id": "d47573076e3f6cc892e523efc067f64e1bba9ed3", "content_id": "c5e9217769f148f13f8933bea12239059db05015", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1789, "license_type": "permissive", "max_line_length": 88, "num_lines": 50, "path": "/tests/test_clean_canton_surf_pop.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nimport pandas as pd\nfrom scripts import clean_canton_surf_pop\n\n# Empty the \"tests/test_data/\" folder\nfile_list = os.listdir('tests/test_data/')\nfor f in file_list:\n if f == 'cantons_surf_pop.csv':\n os.remove('tests/test_data/' + f)\n\n# Create the processed csv file in the \"tests/test_data/\" folder\nclean_canton_surf_pop.create_csv_file('data/raw_data/T_02_02_1_01.xls',\n 'tests/test_data/cantons_surf_pop.csv')\n\nclass TestCleanCantonSurfPop(unittest.TestCase):\n\n def test_create_csv(self):\n \"\"\"\n Test if a csv file is created.\n \"\"\"\n self.assertTrue(os.path.isfile('tests/test_data/cantons_surf_pop.csv'))\n\n def test_columns_csv(self):\n \"\"\"\n Test if the 4th column is \"PopTotal\".\n \"\"\"\n list_cols = pd.read_csv('tests/test_data/cantons_surf_pop.csv').columns\n self.assertEqual(list_cols[3], 'PopTotal')\n\n def test_parse_csv_pop(self):\n \"\"\"\n Test if the processed file contains 54543 for canton AR (Appenzell Rh.-Ext.).\n \"\"\"\n df = pd.read_csv('tests/test_data/cantons_surf_pop.csv')\n self.assertEqual(df.loc[df['Cantons']=='AR', 'PopTotal'].values[0], 54543)\n\n def test_parse_csv_surf(self):\n \"\"\"\n Test if the processed file contains 16714400 ha for canton FR (Fribourg).\n \"\"\"\n df = pd.read_csv('tests/test_data/cantons_surf_pop.csv')\n self.assertEqual(df.loc[df['Cantons']=='FR', 'SurfTotal'].values[0], 1671.44)\n\n def test_parse_csv_length(self):\n \"\"\"\n Test if there are 26 rows in the processed file corresponding to the 26 cantons.\n \"\"\"\n df = pd.read_csv('tests/test_data/cantons_surf_pop.csv')\n self.assertEqual(len(df.index), 26)\n" }, { "alpha_fraction": 0.5985401272773743, "alphanum_fraction": 0.6532846689224243, "avg_line_length": 15.176470756530762, "blob_id": "cabe01b51eb9840d47bac4458ef889d3cb651592", "content_id": "b47e0c27bedd4fad7bdc55a99f6681bce000c782", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 274, "license_type": "permissive", "max_line_length": 81, "num_lines": 17, "path": "/data/processed_data/report.md", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "# Analysis Results\n\n___\n\nPearson correlation between *RatPopUnivLevel* and *PopBySurfTotal*: **0.713**\n\n* Risk error: **0.0044 %** \n\n___\n\nPearson correlation between *RatPopUnivLevel* and *PopBySurfHabAndInf*: **0.819**\n\n* Risk error: **0.0 %** \n\n____\n\n![chart](./chart.png)" }, { "alpha_fraction": 0.7015384435653687, "alphanum_fraction": 0.7481318712234497, "avg_line_length": 53.16666793823242, "blob_id": "0c44e25b65e5bc7beb8702348662878a6f30bdc3", "content_id": "0eecc0e5ac8950610cdaaf824f00d83000663095", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2288, "license_type": "permissive", "max_line_length": 254, "num_lines": 42, "path": "/documentation/files_description.md", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "# Description of data files\n\n## Raw Data\n**data/raw_data/su-f-40.02.15.08.03-2016.xlsx:**\n* Title: Population résidante permanente de 15 ans et plus, selon la formation achevée la plus élevée et le canton, en 2016\n* Author: Office fédéral de la statistique (OFS), section POP\n* Description: population of 15 years old and more by education level. We use the following columns:\n * \"Canton\"\n * \"Hautes écoles\": \"Nombres absolus\". The document provide a confidence interval that we ignored in this study.\n* Creation date: 2013-05-06\n* Last modified date: 2018-01-09\n* URL: https://www.bfs.admin.ch/bfs/fr/home/statistiques/catalogues-banques-donnees/tableaux.assetdetail.4242918.html\n* Format: xlsx\n\n**data/raw_data/T_02_02_1_01.xls:**\n* Title: Statistique de la superficie de la Suisse et population résidante, par canton\n* Author: Office cantonal de la statistique - OCSTAT\n* Description: area and population density, by township and statistical area of ​​the city, since 2005. We use the following columns:\n * Column A with the cantons\n * \"Surface en hectare\": \"Surface d'habitat et d'infrastructure\" and \"Total\"\n * \"Population résidante\"\n* Creation date: 1999-01-29\n* Last modified date: 2017-05-18\n* URL: https://www.ge.ch/statistique/tel/domaines/02/02_02/T_02_02_1_01.xls\n* Format: xls\n\n## Intermediate Data\n**data/intermediate_data/cantons_surf_pop.csv**\n* title: cantons_surf_pop.csv\n* Description: file build from the parsing of \"data/raw_data/T_02_02_1_01.xls\". Contains the columns \"Cantons\", \"SurfHabAndInf\", \"SurfTotal\", \"PopTotal\", \"PopBySurfHabAndInf\", \"PopBySurfTotal\". Describe the population density and the size of each canton.\n\n**data/intermediate_data/cantons_univ_edu.csv**\n* Title: cantons_univ_edu.csv\n* Description: file build from the parsing of \"data/raw_data/su-f-40.02.15.08.03-2016.xlsx\". Contains the columns \"Cantons\", \"UnivEdu\". Mentions the number of person with a university degree.\n* format: text/csv\n\n## Processed data\n\n**data/processed_data/cantons_surf_pop_edu.csv**\n* Title: cantons_surf_pop_edu.csv\n* Description: file with all the processed data. Contains the following columns: \"Cantons\", \"UnivEdu\", \"SurfHabAndInf\", \"SurfTotal\", \"PopTotal\", \"PopBySurfHabAndInf\", \"PopBySurfTotal\", \"RatPopUnivLevel\"\n* format: text/csv\n" }, { "alpha_fraction": 0.598349392414093, "alphanum_fraction": 0.6031636595726013, "avg_line_length": 26.433961868286133, "blob_id": "950f3bcbc2f9e8f1322b13b07128eba4921490b6", "content_id": "146701a217694dfa67f7b0e6452daf8b87f78fbd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1454, "license_type": "permissive", "max_line_length": 109, "num_lines": 53, "path": "/scripts/lib/clean_utils.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport re\nimport unicodedata\n\nclass cleanUtils:\n \"\"\"\n Class used to clean datafames. Tables used to clean data are json files\n located at the same folder (for example \"abbreviated_cantons.json\").\n \"\"\"\n\n # Set class variables to use local files\n module_file = os.path.abspath(__file__)\n module_dir = os.path.dirname(module_file)\n\n\n @classmethod\n def _read_json(cls, file_name):\n \"\"\"\n Return object from a local json file.\n \"\"\"\n\n json_file = os.path.join(cls.module_dir, file_name)\n\n with open(json_file, 'r', encoding='utf8') as f:\n return json.load(f)\n\n\n @classmethod\n def canton_name_to_abbreviation(cls, canton_name):\n \"\"\"\n Return the abbreviated form of the canton name.\n \"\"\"\n if not(hasattr(cls, 'cantons')):\n cls.cantons = cls._read_json('abbreviated_cantons.json')\n\n # Remove parentheses and useless spaces\n canton_name = cls.normalize_txt(re.split(r'[(/]', canton_name)[0].strip())\n return cls.cantons[canton_name]\n\n @staticmethod\n def normalize_txt(txt):\n \"\"\"\n Remove accents et set in lower case.\n \"\"\"\n return unicodedata.normalize('NFD', txt).encode('ascii', 'ignore').decode('utf-8', 'ignoree').lower()\n\n @staticmethod\n def convert_ha_to_km2(nb):\n \"\"\"\n Convert hectares to square kilometers.\n \"\"\"\n return nb / 100\n" }, { "alpha_fraction": 0.5865896940231323, "alphanum_fraction": 0.6074600219726562, "avg_line_length": 40.703704833984375, "blob_id": "55b5e8b33c7c0a0a4e3a271848a22720d5667be0", "content_id": "1b4a7fd06fb7ba929cfce7b2fea7b0372db09383", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2252, "license_type": "permissive", "max_line_length": 164, "num_lines": 54, "path": "/tests/test_calculate_results.py", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nimport pandas as pd\nimport numpy as np\nfrom scripts import clean_canton_surf_pop\nfrom scripts import clean_canton_univ_edu\nfrom scripts import build_dataframe\nfrom scripts import calculate_results\n\n# Empty the \"tests/test_data/\" folder\nfile_list = os.listdir('tests/test_data/')\nfor f in file_list:\n if f in ['chart.png', 'report.md', 'cantons_surf_pop_edu_test.csv', 'tests/test_data/cantons_surf_pop_test2.csv', 'tests/test_data/cantons_univ_edu_test2.csv']:\n os.remove('tests/test_data/' + f)\n\n# Create the processed csv file in the \"tests/test_data/\" folder\nclean_canton_surf_pop.create_csv_file('data/raw_data/T_02_02_1_01.xls',\n 'tests/test_data/cantons_surf_pop_test2.csv')\nclean_canton_univ_edu.create_csv_file('data/raw_data/su-f-40.02.15.08.03-2016.xlsx',\n 'tests/test_data/cantons_univ_edu_test2.csv')\nbuild_dataframe.create_csv_file('tests/test_data/cantons_surf_pop_test2.csv',\n 'tests/test_data/cantons_univ_edu_test2.csv',\n 'tests/test_data/cantons_surf_pop_edu_test.csv')\n\ncalculate_results.main('tests/test_data/cantons_surf_pop_edu_test.csv',\n '--build-chart',\n 'tests/test_data/chart.png')\n\ncalculate_results.main('tests/test_data/cantons_surf_pop_edu_test.csv',\n '--build-report',\n 'tests/test_data/report.md')\n\nclass TestCalculateResults(unittest.TestCase):\n\n def test_main(self):\n \"\"\"\n Test if main function create a report.\n \"\"\"\n self.assertTrue(os.path.isfile('tests/test_data/report.md'))\n\n def test_calculate_pearson_corr(self):\n \"\"\"\n Test if a string report is returned.\n \"\"\"\n serie_1 = pd.Series([1, 2, 3, 4], name='serie_1')\n serie_2 = pd.Series([5, 6, 7, 8], name='serie_2')\n txt = calculate_results.pearson_corr(serie_1, serie_2)\n self.assertTrue(txt.find('*: **1.0**') > 0 and txt.find('Risk error: **0.0 %**') > 0)\n\n def test_create_chart(self):\n \"\"\"\n Test if the chart exists.\n \"\"\"\n self.assertTrue(os.path.isfile('tests/test_data/chart.png'))\n" }, { "alpha_fraction": 0.7448789477348328, "alphanum_fraction": 0.7703289985656738, "avg_line_length": 39.275001525878906, "blob_id": "f9eded6b0da893ffa2a94dacd461e18f68c7d2c1", "content_id": "24b5921a4e09fe5c1904d6decd3f86c3bbe46c17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1611, "license_type": "permissive", "max_line_length": 164, "num_lines": 40, "path": "/README.md", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "# Population density and education level correlation\n\n[![Build Status](https://travis-ci.org/rr39943/pop_density-edu_level.svg?branch=master)](https://travis-ci.org/rr39943/pop_density-edu_level)\n[![codebeat badge](https://codebeat.co/badges/045ca497-dbff-4a7d-a45e-61569840ca5b)](https://codebeat.co/projects/github-com-rr39943-pop_density-edu_level-master)\n\nThis repository is a test repository to analyse and test best practices in data management. It analyses data from Swiss cantons to determine if a correlation exists\nbetween population density and the share of population holding a university degree or equivalent.\n\n## Prerequisites\n* Standard version of python 3.6\n* Python libraries listed in [requirements.txt](./requirements.txt).\n\n## Installation\n\nExample of Linux installation with [virtualenvwrapper](https://virtualenvwrapper.readthedocs.io):\n```\nmkvirtualenv demoData\nworkon demoData\ngit clone https://github.com/rr39943/pop_density-edu_level.git\npip install -r requirements.txt\n```\n\n## Usage\n* To build processed data, run the Snakefile with command \"snakemake\"\n* To clean the \"intermidate_data\" and \"processed_data\", you may use command \"snakemake clean\"\n* To launch the tests, use command \"python -m unittest\"\n\n**Snakemake Rules:**\n\n![rules](/documentation/rulegraph.png)\n\n## Data description\n* List of files: [files_description.md](documentation/files_description.md)\n* Metadata: [codebook.md](./documentation/codebook.md)\n\n## Authors\n* **Raphaël Rey:** [raphael.rey@epfl.ch](mailto:raphael.rey@epfl.ch)\n\n## License\nThis project is licensed under the MIT License - see the [LICENSE.md](./LICENSE.txt) file for details.\n" }, { "alpha_fraction": 0.6228668689727783, "alphanum_fraction": 0.6343856453895569, "avg_line_length": 28.670886993408203, "blob_id": "e6e95e028e91559d1b79eb46623695a2c56d9b3f", "content_id": "f3878a5ed1faec7d4ba40c67bb0bcbb73e621c19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2344, "license_type": "permissive", "max_line_length": 105, "num_lines": 79, "path": "/Snakefile", "repo_name": "rr39943/pop_density-edu_level", "src_encoding": "UTF-8", "text": "rule all:\n input:\n \"data/processed_data/report.md\",\n\n\nrule clean_excel_surf_pop:\n input:\n \"data/raw_data/T_02_02_1_01.xls\",\n \"scripts/lib/abbreviated_cantons.json\",\n \"scripts/lib/clean_utils.py\",\n \"scripts/clean_canton_surf_pop.py\"\n output:\n \"data/intermediate_data/cantons_surf_pop.csv\"\n message:\n \"From Excel file with data about cantons surface and population, create clean csv file.\"\n shell:\n \"python scripts/clean_canton_surf_pop.py {input[0]} {output}\"\n\n\nrule clean_excel_univ_edu:\n input:\n \"data/raw_data/su-f-40.02.15.08.03-2016.xlsx\",\n \"scripts/lib/abbreviated_cantons.json\",\n \"scripts/lib/clean_utils.py\",\n \"scripts/clean_canton_univ_edu.py\"\n output:\n \"data/intermediate_data/cantons_univ_edu.csv\"\n message:\n \"From Excel file with data about cantons with university level education, create clean csv file.\"\n shell:\n \"python scripts/clean_canton_univ_edu.py {input[0]} {output}\"\n\n\nrule combine_csv_files:\n input:\n \"data/intermediate_data/cantons_univ_edu.csv\",\n \"data/intermediate_data/cantons_surf_pop.csv\",\n \"scripts/build_dataframe.py\"\n output:\n \"data/processed_data/cantons_surf_pop_edu.csv\"\n message:\n \"Parse cleaned csv files, combine them and produce a new processed csv file.\"\n shell:\n \"python scripts/build_dataframe.py {input[0]} {input[1]} {output}\"\n\n\nrule create_chart:\n input:\n \"data/processed_data/cantons_surf_pop_edu.csv\",\n \"scripts/calculate_results.py\"\n output:\n \"data/processed_data/chart.png\"\n message:\n \"Produce comparison chart.\"\n shell:\n \"python scripts/calculate_results.py {input[0]} --build-chart {output}\"\n\n\nrule create_report:\n input:\n \"data/processed_data/cantons_surf_pop_edu.csv\",\n \"data/processed_data/chart.png\",\n \"scripts/calculate_results.py\"\n output:\n \"data/processed_data/report.md\"\n message:\n \"Produce final report.\"\n shell:\n \"python scripts/calculate_results.py {input[0]} --build-report {output}\"\n\n\nrule clean:\n shell:\n \"\"\"rm -f data/processed_data/*\n rm -f data/intermediate_data/*\"\"\"\n\nrule rulegraph:\n shell:\n \"snakemake --rulegraph | dot -Tpng > documentation/rulegraph.png\"\n" } ]
16
lackhoa/logic_gate
https://github.com/lackhoa/logic_gate
688925208a2a282b01665deb49124cea46e91621
aa3d6230b282804f0724f3d7e99709913a8c49a9
5ed341b5e3dcfce9191c8a8743b533c574b87fbe
refs/heads/master
2021-01-20T06:12:55.183428
2017-05-20T15:26:02
2017-05-20T15:26:02
89,856,535
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5763331055641174, "alphanum_fraction": 0.5814462900161743, "avg_line_length": 33.224998474121094, "blob_id": "5372302d3fa45453f427edbed90815ed8e6b15fd", "content_id": "b25703ccf5da6b60c0da09adcac99f0469ab8a44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4107, "license_type": "no_license", "max_line_length": 208, "num_lines": 120, "path": "/LogicExpStream.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "from ExtensibleStream import ExtensibleStream\nfrom LogicExp import operation_count\nfrom Enum import Var, Op, Const\n\n\nclass LogicExpStream(ExtensibleStream):\n \"\"\"\n Represents a logic expression\n Example: [[AND], [A, AND], [B, C]]\n TESTED\n \"\"\"\n\n def extend(self):\n \"\"\"\n TESTED! (BUT THERE IS A BUG) (BUG FIXED!)\n \"\"\"\n if self.value == []:\n next_level_count = 1\n else:\n last_level = self.value[len(self.value) - 1]\n op_count_last_lvl = operation_count(last_level)\n if op_count_last_lvl is 0:\n raise ValueError('Hey, you cannot extend this. This expression is complete')\n next_level_count = 2 * op_count_last_lvl\n # the next level always contains branches count of 2*branch_count_of_the_last_level (except for the empty case)\n # so you can expand that much\n lvl_str = LevelStream([])\n next_lvl_streams = lvl_str.extend_count(next_level_count)\n\n result = []\n for level_stream in next_lvl_streams:\n result.append(self._make_child(level_stream.value))\n\n return result\n\n def extend_count(self, count: int):\n raise NotImplementedError('This function is not implemented due to complete expressions')\n\n\nclass LevelStream(ExtensibleStream):\n \"\"\"\n A stream of a level, which is just a list of expressions symbols\n TESTED!\n \"\"\"\n\n def extend(self):\n result = []\n exp_symbol_list = list(Const)\n exp_symbol_list.extend(list(Var))\n exp_symbol_list.extend(list(Op))\n\n for exp_symbol in exp_symbol_list:\n result.append(self._make_child(exp_symbol))\n return result\n\n def double_extend(self):\n \"\"\"\n Extend twice using pairs: more efficient\n TESTING!\n \"\"\"\n result = []\n pair_streams = PairStream([]).extend()\n\n for pair_stream in pair_streams:\n child = self._make_child(pair_stream.value[0][0])\n child = child._make_child(pair_stream.value[0][1])\n result.append(child)\n return result\n\n def extend_count(self, count):\n \"\"\"\n Now using pairs to make this thing cost less memoy\n TESTING!\n \"\"\"\n if count == 0:\n raise ValueError('Hey, you cannot extend 0 times')\n elif count == 1:\n return self.extend()\n else: # Now you we use pairs\n current_streams = [self]\n\n for _ in range(count // 2):\n next_streams = []\n for stream in current_streams:\n next_streams.extend(stream.double_extend())\n current_streams = next_streams\n\n if not count % 2 == 0: # Do another round if the pairs ain't enough\n next_streams = []\n for stream in current_streams:\n next_streams.extend(stream.extend())\n current_streams = next_streams\n\n return current_streams\n\n\nclass PairStream(ExtensibleStream):\n \"\"\"\n + pair: a pair is a tuple (?) of two logical symbols. Though I'm using tuple here, note that the order of the pair doesn't matter, and that two pair with the same symbols ordered differently are identical\n + pair is just a means to save memory during the construction of a level. Aside from that, it isn't used in any other function due to compatibility\n \"\"\"\n all_pairs = [] # Class variable to store the pairs since it only has to extend once\n\n def extend(self):\n \"\"\"\n Returns: a list of pairs\n TESTING\n \"\"\"\n if self.all_pairs == []:\n exp_symbol_list = list(Const)\n exp_symbol_list.extend(list(Var))\n exp_symbol_list.extend(list(Op))\n\n result = []\n for exp_symbol1 in exp_symbol_list:\n for exp_symbol2 in exp_symbol_list:\n if exp_symbol1.value <= exp_symbol2.value: # For symmetric reason\n self.all_pairs.append(self._make_child((exp_symbol1, exp_symbol2)))\n\n return self.all_pairs\n" }, { "alpha_fraction": 0.5814977884292603, "alphanum_fraction": 0.5983415246009827, "avg_line_length": 36.82352828979492, "blob_id": "2656b938afeb3f775ff1c7890f0dbe1b9b108879", "content_id": "f3a991b2e2d80c2b391721e21779167985a5ee08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3859, "license_type": "no_license", "max_line_length": 249, "num_lines": 102, "path": "/test.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "import unittest\nfrom LogicExp import *\nfrom LogicExpStream import *\nfrom ExtensibleStream import *\nfrom LogicExp import _make_linker\nfrom BitStream import BitStream\n\n\nclass Test(unittest.TestCase):\n \"\"\"\n Testing the BitStream class\n \"\"\"\n\n def test_bit_extension(self):\n \"\"\"\n The real test function. I don't know how it works, but I think it will be cool\n :return: dunno\n \"\"\"\n bit_stream = BitStream([])\n test1 = bit_stream.extend_count(3)\n test2 = bit_stream.extend_count(4)\n self.assertEqual(len(test1), 8)\n self.assertEqual(len(test2), 16)\n\n def test_level_stream_extend(self):\n \"\"\"\n Tests the extend method of level stream class\n \"\"\"\n lvl_stream_test = LevelStream([])\n self.assertEquals(len(lvl_stream_test.extend_count(1)), 7)\n self.assertEquals(len(lvl_stream_test.extend_count(2)),28)\n self.assertEquals(len(lvl_stream_test.extend_count(3)), 28*7)\n\n def test_LogicExpStream_extend(self):\n log_exp_str = LogicExpStream([])\n extend_1 = log_exp_str.extend()\n self.assertEquals(len(extend_1), 7)\n\n extend_2 = []\n for str in extend_1:\n if not is_complete(str.value):\n extend_2.extend(str.extend())\n self.assertEquals(len(extend_2), 56)\n\n def test_is_complete(self):\n exp1 = [[Op.AND], [Var.A, Op.XOR], [Var.B, Var.C]]\n self.assertEquals(is_complete(exp1), True)\n\n exp2 = [[Op.AND], [Var.A, Op.XOR], [Const.ONE, Op.AND]]\n self.assertEquals(is_complete(exp2), False)\n\n def test_evaluate(self):\n exp1 = [[Op.AND], [Var.A, Op.XOR], [Var.B, Var.C]]\n args = [True, False, False]\n self.assertEquals(evaluate(exp1, args), False)\n\n exp2 = [[Op.AND], [Var.A, Op.XOR], [Const.ZERO, Op.XOR], [Var.A, Var.C]]\n self.assertEquals(evaluate(exp2, args), True)\n\n def test_make_child(self):\n parent = ExtensibleStream(['mammal', 'chicken'])\n child = parent._make_child('dog')\n self.assertEquals(child.value, ['mammal', 'chicken', 'dog'])\n\n def test_make_truth_table(self):\n \"\"\"\n Testing the make_truth_table function in the LogicExp file\n \"\"\"\n exp = [[Op.AND], [Var.A, Op.XOR], [Const.ZERO, Op.XOR], [Var.A, Var.C]]\n args_tuples = [(False,False,False), (False,False,True), (False,True,False), (False,True,True), (True,False,False), (True,False,True), (True,True,False), (True,True,True)]\n\n truth_table = make_truth_table(exp, args_tuples)\n\n compare_table = {(False, False, False): False, (False, False, True): False, (False, True, False): False, (False, True, True): False, (True, False, False): True, (True, False, True): False,(True, True, False): True, (True, True, True): False}\n\n self.assertEquals(truth_table, compare_table)\n\n def test_make_linker(self):\n exp = [[Op.XOR], [Op.AND, Op.XOR], [Var.A, Const.ONE, Op.AND, Var.C], [Var.A, Var.A]]\n linker = _make_linker(exp)\n compare_value = {(0, 0): (0, 1), (1, 0): (0, 1), (1, 1): (2, 3), (2, 2): (0, 1)}\n\n self.assertEquals(linker, compare_value)\n\n def test_pair_stream_extend(self):\n test_pair_stream = PairStream([])\n self.assertEquals(len(PairStream([]).extend()), 28)\n\n def test_stream_extend_count(self):\n self.assertEquals(len(LevelStream([]).extend_count(1)), 7)\n self.assertEquals(len(LevelStream([]).extend_count(2)), 28)\n self.assertEquals(len(LevelStream([]).extend_count(4)), 784)\n\n def test_double_extend(self):\n self.assertEquals(len(LevelStream([]).double_extend()), 28)\n\n def test_operation_count_all(self):\n exp = [[Op.AND], [Op.XOR, Var.C], [Op.AND, Var.A]]\n self.assertEquals(operation_count_all(exp), 3)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n" }, { "alpha_fraction": 0.5947992205619812, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 28.910890579223633, "blob_id": "70faf1cf29d552fbe58cfe4b1c8dfbb806b731c5", "content_id": "162130e8f86a55f7dbf76e35631ac0627d791cd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3038, "license_type": "no_license", "max_line_length": 118, "num_lines": 101, "path": "/main.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "from LogicExpStream import LogicExpStream\nfrom LogicExp import is_complete, evaluate, make_truth_table, operation_count_all\nfrom BitStream import *\nimport sys\n\n\n# These are the full adder functions\ndef full_adder_result(args: list) -> bool:\n \"\"\"\n Returns the result of full_adder with arguments args\n \"\"\"\n if args == [0, 0, 0]:\n return False\n elif args == [0, 0, 1]:\n return True\n elif args == [0, 1, 0]:\n return True\n elif args == [0, 1, 1]:\n return False\n\n elif args == [1, 0, 0]:\n return True\n elif args == [1, 0, 1]:\n return False\n elif args == [1, 1, 0]:\n return False\n elif args == [1, 1, 1]:\n return True\n # That's it! There's no other cases: NO OTHER CASES\n\n\ndef full_adder_carrier(args: list) -> bool:\n \"\"\"\n Returns the carrier of full_adder with arguments args\n \"\"\"\n if args == [0, 0, 0]:\n return False\n elif args == [0, 0, 1]:\n return False\n elif args == [0, 1, 0]:\n return False\n elif args == [0, 1, 1]:\n return True\n\n elif args == [1, 0, 0]:\n return False\n elif args == [1, 0, 1]:\n return True\n elif args == [1, 1, 0]:\n return True\n elif args == [1, 1, 1]:\n return True\n # That's it! There's no other cases: NO OTHER CASES\n\n\n# compare the results of the two funcitons\n\n# Preparing args:\nall_args = []\nbit_stream = BitStream([])\nfor bit_stream_third in bit_stream.extend_count(3):\n all_args.append(tuple(bit_stream_third.value))\n\n# Preparing truth table for the logic gates:\nfull_adder_result_truth_table = {} # Yes, the truth table is a dictionary with keys as argument list\nfor args in all_args:\n full_adder_result_truth_table[args] = full_adder_result(list(args))\n\nfull_adder_carrier_truth_table = {} # Yes, the truth table is a dictionary with keys as argument list\nfor args in all_args:\n full_adder_carrier_truth_table[args] = full_adder_carrier(list(args)) # turned args to tuples to use them as keys\n\n# Make a empty LogicExpStream\ncurrent_exp_stream_pool = [LogicExpStream([])]\n# then expand it\n\n# constraints: necessary to run this shit without frying your RAM\noperation_constraint = 4 # Constraint of operation count per expression\nlevel_constraint = 4 # Constraint of the number of level\n\nlevel_count = 0 # This is here just for the tracking\nwhile level_count <= level_constraint:\n print('level: ', level_count)\n level_count += 1\n \n # Extending and testing at the same time\n next = []\n _stream_count = 0\n for stream in current_exp_stream_pool:\n print('stream no.: ', _stream_count)\n _stream_count += 1\n exp = stream.value\n if is_complete(exp):\n if make_truth_table(exp, all_args) == full_adder_carrier_truth_table:\n print(exp)\n sys.exit()\n else:\n if operation_count_all(exp) <= operation_constraint:\n next.extend(stream.extend())\n\n current_exp_stream_pool = next\n \n" }, { "alpha_fraction": 0.567251443862915, "alphanum_fraction": 0.5877193212509155, "avg_line_length": 15.190476417541504, "blob_id": "fe13b8997433c6782b86bc07d17e9db3ca86d56b", "content_id": "3e1d2f24f799ab4f406c9389fbc0d1ebde8aaf97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 74, "num_lines": 21, "path": "/Enum.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "from enum import Enum\n\n\n# Since I need to sort theme later, the value of the enums are not default\n\nclass Const(Enum):\n \"\"\"Represents a boolean constant\"\"\"\n ZERO = 0\n ONE = 1\n\nclass Var(Enum):\n \"\"\"Represents a variable\"\"\"\n A = 2\n B = 3\n C = 4\n\n\nclass Op(Enum):\n \"\"\"Represents an operation\"\"\"\n AND = 5\n XOR = 6\n\n\n" }, { "alpha_fraction": 0.5744141936302185, "alphanum_fraction": 0.5763141512870789, "avg_line_length": 29.365385055541992, "blob_id": "a94d9a74cb0e60193d9639a17ca6a93b3a17e845", "content_id": "d16d8a8c9c282b25087ae85a89a6b3829c0e30d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1579, "license_type": "no_license", "max_line_length": 81, "num_lines": 52, "path": "/ExtensibleStream.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "class ExtensibleStream:\n \"\"\"\n You can think of a stream as a list that can expand into\n a bigger list. The expansion of the list is defined in the\n extend() method.This is not bad use of OOP. If you try to strip out the layer\n of object, you will only get a list with no distinct behavior\n Each time the stream extend, the list (the value) will receive a new node\n A node can actually be anything\n \"\"\"\n\n def __init__(self, value: list):\n \"\"\"\n value is the list of nodes\n \"\"\"\n self.value = value\n\n def extend(self):\n \"\"\"\n Returns the streams that can be created from this stream\n \"\"\"\n pass\n\n # Extend as many times as you want\n def extend_count(self, count: int) -> list:\n \"\"\"\n\n :param count: The number of times to extend (not 0)\n :return: The streams that are created\n \"\"\"\n if count == 0:\n raise ValueError('Hey, you cannot extend 0 times')\n\n current_streams = [self]\n for _ in range(count):\n next_streams = []\n for stream in current_streams:\n next_streams.extend(stream.extend())\n current_streams = next_streams\n return current_streams\n\n # use this in 'extend(self)'\n def _make_child(self, new_node):\n \"\"\"\n Why should the new node value be a list?\n \"\"\"\n # Put the the value of the parent before the child\n tmp = []\n tmp.extend(self.value)\n tmp.append(new_node)\n child = type(self)(tmp)\n\n return child\n" }, { "alpha_fraction": 0.5885714292526245, "alphanum_fraction": 0.6342856884002686, "avg_line_length": 20.875, "blob_id": "d7c1302f967fd5e10667c08f0165d4572daf2145", "content_id": "7ca857a98325be20f9f87f9e1de1913ef3cda2ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/test_manual.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "from LogicExpStream import *\n\nfrom LogicExp import *\n\nfirst_dict = {(0, 1): True, (1, 2): False}\nsecond_dict = {(1, 2): False, (0, 1): True}\n\nprint(first_dict == second_dict)\n" }, { "alpha_fraction": 0.6221374273300171, "alphanum_fraction": 0.6221374273300171, "avg_line_length": 20.83333396911621, "blob_id": "238f4807665c245cf93320ce20ebd8192120e734", "content_id": "12bcb21b813f832be86409f27f5377b93a12a269", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 59, "num_lines": 12, "path": "/BitStream.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "from ExtensibleStream import ExtensibleStream\n\n\nclass BitStream(ExtensibleStream): # MAKE THIS INTO A FILE\n \"\"\"\n Tested!\n \"\"\"\n\n def extend(self):\n zero = self._make_child(True)\n one = self._make_child(False)\n return [zero, one]\n" }, { "alpha_fraction": 0.5701689720153809, "alphanum_fraction": 0.5777614712715149, "avg_line_length": 28.16428565979004, "blob_id": "3a0decbc9960ab2df1fa7c45bc421487748cc427", "content_id": "d1597af651be985484b56c9f0e1c445825b525aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4083, "license_type": "no_license", "max_line_length": 161, "num_lines": 140, "path": "/LogicExp.py", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "from Enum import Op, Var, Const\n\n\n# This file deals with the real logical expressions, it has all necessary functions\n\ndef _evaluate(exp: list, x: int, y: int, args: list, linker: dict) -> bool:\n \"\"\"\n This function is to evaluate an expression.\n Input: x is the level count, y is the count horizontally, and args is the list of variable's value\n Added: linker (see explanation in 'make_linker' function)\n TESTING!\n \"\"\"\n current = exp[x][y]\n\n if current in list(Var):\n result = args[current.value - 2]\n elif current in list(Const):\n if current == Const.ONE:\n result = True\n else: # current = Const.ZERO\n result = False\n\n else: # If it is an operation\n # Finding y_guider by counting the number of operations that appear before exp[x][y]\n # y_guider = 0\n # if not y == 0: # Yeah we can't count if there's nothing before that\n # for yy in range(y):\n # if exp[x][yy] in list(Op):\n # y_guider += 1\n\n # getting left and right children\n # left = _evaluate(exp, x + 1, y_guider * 2, args)\n # right = _evaluate(exp, x + 1, y_guider * 2 + 1, args)\n\n # getting left and right children:\n left = _evaluate(exp, x + 1, linker[(x, y)][0], args, linker)\n right = _evaluate(exp, x + 1, linker[(x, y)][1], args, linker)\n\n if current == Op.AND:\n result = left and right\n else: # If it's XOR\n result = (left != right) # Now you know that != is XOR\n\n return result\n\n\ndef evaluate(exp: list, args: list) -> bool:\n \"\"\"\n This function is to evaluate an expression.\n Evaluates the root node of the expression tree\n TESTING!\n \"\"\"\n linker = _make_linker(exp)\n return _evaluate(exp, 0, 0, args, linker)\n\n\ndef make_truth_table(exp: list, args_tuples: list) -> dict:\n \"\"\"\n This does the same thing as 'evaluation', but with many combinations of arguments stored in tuples\n remarks: DON'T feed it incomplete expressions\n return a dictionary with args_tuples as keys and boolean value of the expression as value\n TESTING\n \"\"\"\n result = {}\n\n linker = _make_linker(exp)\n for args in args_tuples:\n result[args] = _evaluate(exp, 0, 0, args, linker)\n\n return result\n\n\ndef _make_linker(exp: list) -> dict:\n \"\"\"\n Linker is a mapping tool to evaluate an expression faster\n Why faster? It can receive a location of a node (operation) and spit out the y value of the location of its children\n remarks: DON'T feed it incomplete expression!\n return: a linker as a dictionary with the (x, y) value of the parent nodes in tuples as keys and the two y values of their children nodes in tuples as values\n TESTING!\n \"\"\"\n linker = {}\n x = 0\n for level in exp:\n y_slider = 0\n y = 0\n\n for exp_symbol in level:\n if exp_symbol in list(Op):\n linker[(x, y)] = (y_slider, y_slider + 1)\n y_slider += 2\n\n y += 1\n x += 1\n\n return linker\n\n\ndef operation_count(level: list) -> int:\n \"\"\"\n Counts the number of operations of the last level from a list of levels\n TESTED!\n \"\"\"\n count = 0\n for i in level:\n if type(i) is Op: # Op means 'operation'\n count += 1\n\n return count\n\ndef operation_count_all(exp) -> int:\n \"\"\"\n Counts the number of operations of an expression\n Returns 0 if the expression is empty\n \"\"\"\n count = 0\n if not exp == []:\n for level in exp: \n for i in level:\n if type(i) is Op: \n count += 1\n else:\n count = 0\n\n return count\n\ndef is_complete(exp: list) -> bool:\n \"\"\"\n Check if an expression under the form of list of levels a complete expression\n TESTED\n \"\"\"\n result = False\n\n if not exp == []:\n last_level = exp[len(exp) - 1]\n for exp in last_level:\n if type(exp) is Op:\n result = False\n break\n else: result = True\n return result\n" }, { "alpha_fraction": 0.8103896379470825, "alphanum_fraction": 0.8103896379470825, "avg_line_length": 127.66666412353516, "blob_id": "9d36c78a6da7a8c8ec492434d20dcef1ad35dd0f", "content_id": "c61cbc66d5ed20b29b7d9839eedb04ca61041289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 385, "license_type": "no_license", "max_line_length": 244, "num_lines": 3, "path": "/README.md", "repo_name": "lackhoa/logic_gate", "src_encoding": "UTF-8", "text": "# logic_gate\nThis is a program for generating logic gates to solve math problems like addition, substraction... basically everything a computer can do. There are absolutely no limit to what logic gate type you want to use and the number of operands, either.\nThe program was built for another purpose of testing the application of extensible streams and ended up pretty heavility on it." } ]
9
AurelSann/StarWars
https://github.com/AurelSann/StarWars
d519622041fa7c4c8e39949d2512376314f4744f
a35521e2271dbd4f21f247637f84ccae5ac3fcb7
4dd384b90b38551ecc7cad750a5cdc26e0eac58f
refs/heads/master
2023-03-11T11:26:29.983503
2021-03-05T11:33:57
2021-03-05T11:33:57
341,173,166
2
0
null
2021-02-22T11:12:20
2021-03-04T17:17:38
2021-03-04T18:12:45
Jupyter Notebook
[ { "alpha_fraction": 0.5124062895774841, "alphanum_fraction": 0.5606926083564758, "avg_line_length": 30.560564041137695, "blob_id": "93bb16865dd87b436b46a163c11a780ebc4bb0d2", "content_id": "38993e57138c2c98301685c2082c4e569a433580", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11205, "license_type": "no_license", "max_line_length": 192, "num_lines": 355, "path": "/StarWars/decisiontree.py", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "import numpy as np\n\nclass DecisionTree():\n\n def __init__(self, prediction):\n self.prediction = prediction\n self.decision_answer = []\n\n\n def cat1(self):\n '''cat1 : what does it look like ?'''\n\n smooth = self.prediction[0]\n #df['Class1.1']\n\n feature_or_disk = self.prediction[1]\n #df['Class1.2']\n\n star = self.prediction[2]\n #df['Class1.3']\n\n if smooth > feature_or_disk and smooth > star:\n #go to cat 7\n self.decision_answer.append('smooth')\n self.cat7()\n\n elif feature_or_disk > smooth and feature_or_disk > star:\n #STOP --> étoile\n self.decision_answer.append('feature_or_disk')\n self.cat2()\n\n else:\n #star > all:\n #go to class 2\n self.decision_answer.append('star')\n\n return self.decision_answer\n\n def cat2(self):\n '''cat2 : featureordisk --> could this be a disk view edge-on ?'''\n\n edgeon_yes = self.prediction[3]\n #df['Class2.1']\n edgeon_no = self.prediction[4]\n #df['Class2.2']\n\n if edgeon_yes > edgeon_no:\n #go to cat 9\n self.decision_answer.append('edgeon_yes')\n self.cat9()\n\n else:\n #edgeon_no> edgeon_yes\n self.decision_answer.append('edgeon_no')\n #go to cat 3\n self.cat3()\n\n return self.decision_answer\n\n\n def cat3(self):\n '''cat 3 : featureordisk --> edgeon_no --> is there a sign of bar feature ?'''\n\n bar_yes = self.prediction[5]\n #df['Class3.1']\n bar_no = self.prediction[6]\n #df['Class3.2']\n\n #all : go to cat 4\n if bar_yes > bar_no:\n self.decision_answer.append('bar_yes')\n self.cat4()\n\n else:\n #bar_no > bar_yes\n self.decision_answer.append('bar_no')\n self.cat4()\n\n return self.decision_answer\n\n\n\n def cat4(self):\n '''cat4 : featureordisk --> no --> bar_yes/no --> spiral arm pattern ?'''\n\n arm_yes = self.prediction[7]\n #df['Class4.1']\n arm_no = self.prediction[8]\n #df['Class4.2']\n\n if arm_yes > arm_no:\n #go to cat10\n self.decision_answer.append('arm_yes')\n self.cat10()\n\n else:\n #arm_yes > arm_no\n self.decision_answer.append('arm_no')\n #go to cat5\n self.cat5()\n\n return self.decision_answer\n\n\n def cat5(self):\n '''cat5 : prominence of central buldge ?\n featureordisk --> edgeon_no --> bar_yes/no --> arm_no\n featureordisk --> edgeon_no --> --> bar_yes/no --> arm_yes --> all arms sizes --> all number of arms'''\n\n no_bulge = self.prediction[9]\n #df['Class5.1']\n noticeable_bulge = self.prediction[10]\n #df['Class5.2']\n obvious_bulge = self.prediction[11]\n #df['Class5.3']\n dominant_bulge = self.prediction[12]\n #df['Class5.4']\n\n #all: go to cat 6\n if no_bulge > noticeable_bulge and no_bulge > obvious_bulge and no_bulge > dominant_bulge:\n self.decision_answer.append('no_bulge')\n self.cat6()\n\n elif noticeable_bulge > no_bulge and noticeable_bulge > obvious_bulge and noticeable_bulge > dominant_bulge:\n self.decision_answer.append('noticeable_bulge')\n self.cat6()\n\n elif obvious_bulge > no_bulge and obvious_bulge > noticeable_bulge and obvious_bulge > dominant_bulge:\n self.decision_answer.append('obvious_bulge')\n self.cat6()\n\n else:\n #dominant_bulge > all\n self.decision_answer.append('dominant_bulge')\n self.cat6()\n\n\n return self.decision_answer\n\n\n def cat6(self):\n '''cat6 - is there anything odd ?\n #featureordisk --> edgeon_no --> bar_yes/no --> arm_no --> all\n #Smooth --> class 7 : all\n #featureordisk --> edgeon_yes --> class 9 :all'''\n\n odd_yes = self.prediction[13]\n #df['Class6.1']\n odd_no = self.prediction[14]\n #df['Class6.2']\n\n if odd_yes > odd_no:\n #go to cat 8\n self.decision_answer.append('odd_yes')\n self.cat8()\n\n else:\n self.decision_answer.append('odd_no')\n #stop\n\n return self.decision_answer\n\n '''DEUXIEME CHEMIN'''\n\n def cat7(self):\n #cat7 : Smooth - how rounded is it ?\n completely_round = self.prediction[15]\n #df['Class7.1']\n in_between = self.prediction[16]\n #df['Class7.2']\n cigar_shaped = self.prediction[17]\n #df['Class7.3']\n\n #all: go to cat6\n if completely_round > in_between and completely_round > cigar_shaped:\n self.decision_answer.append('completely_round')\n self.cat6()\n\n elif in_between > completely_round and in_between > cigar_shaped:\n self.decision_answer.append('in_between')\n self.cat6()\n\n else:\n #cigar_shaped > all\n self.decision_answer.append('cigar_shaped')\n self.cat6()\n\n return self.decision_answer\n\n\n def cat8(self):\n #cat8 : featureordisk --> edgeon_no --> bar_yes/no --> arm_no --> all --> odd_yes --> ring,\n #disturber or irregular ?\n\n ring = self.prediction[18]\n #df['Class8.1']\n lens_or_arc = self.prediction[19]\n #df['Class8.2']\n disturbed = self.prediction[20]\n #df['Class8.3']\n irregular = self.prediction[21]\n #df['Class8.4']\n other = self.prediction[22]\n #df['Class8.5']\n merger = self.prediction[23]\n #df['Class8.6']\n dust_lane = self.prediction[24]\n #df['Class8.7']\n\n #all: stop\n if ring > lens_or_arc and ring > disturbed and ring > irregular and ring > other and ring > merger and ring > dust_lane:\n self.decision_answer.append('ring')\n #stop\n\n elif lens_or_arc > ring and lens_or_arc > disturbed and lens_or_arc > irregular and lens_or_arc > other and lens_or_arc > merger and lens_or_arc > dust_lane:\n self.decision_answer.append('lens_or_arc')\n #stop\n\n elif disturbed > lens_or_arc and disturbed > ring and disturbed > irregular and disturbed > other and disturbed > merger and disturbed > dust_lane:\n self.decision_answer.append('disturbed')\n #stop\n\n elif irregular > lens_or_arc and irregular > disturbed and irregular > ring and irregular > other and irregular > merger and irregular > dust_lane:\n self.decision_answer.append('irregular')\n #stop\n\n elif other > lens_or_arc and other > disturbed and other > irregular and other > ring and other > merger and other > dust_lane:\n self.decision_answer.append('other')\n #stop\n\n elif merger > lens_or_arc and merger > disturbed and merger > irregular and merger > other and merger > ring and merger > dust_lane:\n self.decision_answer.append('merger')\n #stop\n\n else:\n #dust_lane > all\n self.decision_answer.append('dust_lane')\n #stop\n return self.decision_answer\n\n\n\n def cat9(self):\n #cat9 : featureordisk --> edgeon_yes --> if there a bulge, if yes what shape ?\n\n rounded = self.prediction[25]\n #df['Class9.1']\n boxy = self.prediction[26]\n #df['Class9.2']\n no_bulge2 = self.prediction[27]\n #df['Class9.3']\n\n # all: go to cat6\n if rounded > boxy and rounded > no_bulge2:\n self.decision_answer.append('rounded')\n self.cat6()\n\n elif boxy > rounded and boxy > no_bulge2:\n self.decision_answer.append('boxy')\n self.cat6()\n\n else:\n #no_bulge2 > all\n self.decision_answer.append('no_bulge2')\n self.cat6()\n\n return self.decision_answer\n\n\n\n def cat10(self):\n #cat10 : featureordisk --> edgeon_no --> bar_yes/no --> arm_yes --> how tights are the arms ?\n\n tight = self.prediction[28]\n #df['Class10.1']\n medium = self.prediction[29]\n #df['Class10.2']\n loose = self.prediction[30]\n #df['Class10.3']\n\n #all: go to cat 11\n if tight > medium and tight > loose:\n self.decision_answer.append('tight')\n #cat11()\n\n elif medium > tight and medium > loose:\n self.decision_answer.append('medium')\n #cat11()\n\n else:\n #loose > all\n self.decision_answer.append('loose')\n #cat11()\n\n return self.decision_answer\n\n\n\n def cat11(self):\n #cat 11: featureordisk --> edgeon_no --> bar_yes/no --> arm_yes --> all arms sizes --> how many arms ?\n\n one_arm = self.prediction[31]\n #df['Class11.1']\n two_arms = self.prediction[32]\n #df['Class11.2']\n three_arms = self.prediction[33]\n #df['Class11.3']\n four_arms = self.prediction[34]\n #df['Class11.4']\n more_than_four_arms = self.prediction[35]\n #df['Class11.5']\n cant_tell_arms = self.prediction[36]\n #df['Class11.6']\n\n #all: go to cat5\n if one_arm > two_arms and one_arm > three_arms and one_arm > four_arms and one_arm > more_than_four_arms and one_arm > cant_tell_arms:\n self.decision_answer.append('one_arm')\n self.cat5()\n\n elif two_arms > one_arm and two_arms > three_arms and two_arms > four_arms and two_arms > more_than_four_arms and two_arms > cant_tell_arms:\n self.decision_answer.append('two_arms')\n self.cat5()\n\n elif three_arms > one_arm and three_arms > two_arms and three_arms > four_arms and three_arms > more_than_four_arms and three_arms > cant_tell_arms:\n self.decision_answer.append('three_arms')\n self.cat5()\n\n elif four_arms > one_arm and four_arms > three_arms and four_arms > two_arms and four_arms > more_than_four_arms and four_arms > cant_tell_arms:\n self.decision_answer.append('four_arms')\n self.cat5()\n\n elif more_than_four_arms > one_arm and more_than_four_arms > three_arms and more_than_four_arms > four_arms and more_than_four_arms > two_arms and more_than_four_arms > cant_tell_arms:\n self.decision_answer.append('more_than_four_arms')\n self.cat5()\n\n else:\n #cant_tell_arms > all\n self.decision_answer.append('cant_tell_arms')\n self.cat5()\n\n\n return self.decision_answer\n\nif __name__ == \"__main__\":\n prediction = np.array([0.0061849 , 0.2533387, 0.09984358, 0.00634132, 0.9516694,\n 0.31117438, 0.18507952, 0.022126181, 0.1901462, 0.15705012,\n 0.19438594, 0.03589064, 0.21429499, 0.078570501, 0.19592663,\n 0.2217514 , 0.05080319, 0.03898825, 0.01447876, 0.02751926,\n 0.02383724, 0.06303088, 0.04324729, 0.00319327, 0.06603167,\n 0.01039219, 0.02341971, 0.08231398, 0.007125346, 0.03151208,\n 0.1228107, 0.07996398, 0.01434553, 0.00636853, 0.00664883,\n 0. ])\n trainer = DecisionTree(prediction)\n result = trainer.cat1()\n print(result)\n" }, { "alpha_fraction": 0.5381836891174316, "alphanum_fraction": 0.54592365026474, "avg_line_length": 33, "blob_id": "18f6e19e8c3eefdd5cdac8cb2cca4c450602e804", "content_id": "7d8ab8be04dbca945a0545c54173264b9ba0e038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1938, "license_type": "no_license", "max_line_length": 89, "num_lines": 57, "path": "/StarWars/DataGenerator.py", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom PIL import Image\nimport pandas as pd\nimport tensorflow as tf\nfrom data import load_images\n\nclass DataGenerator(tf.keras.utils.Sequence):\n\n def __init__(self, df, batch_size=64, shuffle=True, ):\n self.batch_size = batch_size\n self.df = df\n self.indices = self.df.index.tolist()\n self.shuffle = shuffle\n self.augmentator = ImageDataGenerator(\n rotation_range=90,\n width_shift_range=[-50,50],\n height_shift_range=0.5,\n brightness_range= [0.2, 1.9],\n horizontal_flip=True,\n fill_mode='nearest')\n self.on_epoch_end()\n\n def __len__(self):\n '''returns number of minibatches per epoch'''\n return len(self.indices) // self.batch_size\n\n def on_epoch_end(self):\n '''shuffles the indices '''\n self.index = np.arange(len(self.indices))\n if self.shuffle == True:\n np.random.shuffle(self.index)\n\n def __get_X_image(self, df):\n '''returns images'''\n X = load_images(df)\n return X\n\n def __get_Y(self, df):\n '''returns y'''\n return np.array(df.drop(columns=[\"image\"]))\n\n def _get_data(self, batch):\n '''returns batch of images and y'''\n df_batch = self.df.query(\"index in @batch\")\n X = self.__get_X_image(df_batch)\n y = self.__get_Y(df_batch)\n return X, y\n\n def __getitem__(self, index):\n '''creates batches and returns final X and y'''\n index = self.index[index * self.batch_size:(index + 1) * self.batch_size]\n batch = [self.indices[k] for k in index]\n X, y = self._get_data(batch)\n\n # Add Augmentator sa class attr\n X_augmented = self.augmentator.flow(X, batch_size=self.batch_size, shuffle=False)\n return next(X_augmented), y\n" }, { "alpha_fraction": 0.7362204790115356, "alphanum_fraction": 0.7755905389785767, "avg_line_length": 7.129032135009766, "blob_id": "d769fd6c975c73c70f1dff6dc8a14481406cc5ad", "content_id": "7bcdc50e93763f3211e54c969b4b6381df4ca257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 254, "license_type": "no_license", "max_line_length": 17, "num_lines": 31, "path": "/requirements.txt", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "# packaging\npip>=9\nsetuptools>=26\ntwine\nwheel>=0.29\n\n# data science\nnumpy\npandas\ntensorflow\npillow\n\n# tests/linter\nblack\ncoverage\nflake8\npytest\nyapf\n\n# API\nuvicorn\npython-multipart\nfastapi\napi\n\n\n# utilities\nsix>=1.14\njoblib\nmemoized-property\ntermcolor\n\n\n" }, { "alpha_fraction": 0.6241909265518188, "alphanum_fraction": 0.6318770051002502, "avg_line_length": 32.84931564331055, "blob_id": "d7a33d3ac0afc14774ecfed300a7af9af1a6ff64", "content_id": "0b9999fd4c49bf649cb90f76fe71f0e4372fc49f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2472, "license_type": "no_license", "max_line_length": 152, "num_lines": 73, "path": "/StarWars/data.py", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "'''Get DataFrame from Aurelien Bucket'''\n\nimport pandas as pd\nfrom google.cloud import storage\nimport numpy as np\nfrom PIL import Image\n\n\nBUCKET_DATA_PATH = \"gs://lw-verspieren-starwars/data/\"\n\n\ndef rename(GalaxyID):\n return str(GalaxyID)+'.jpg'\n\ndef get_data(y, nrows): # add nrows to sample\n '''returns a DataFrame with nrows from s3 bucket'''\n\n if y == 'train':\n # Loading DataFrames\n df_y = pd.read_csv(f'{BUCKET_DATA_PATH}training_solutions_rev1.csv', nrows=nrows)\n df_images = pd.read_csv(f'{BUCKET_DATA_PATH}images_liste_train.csv', nrows=nrows)\n\n # Checking images corresponds to df\n list_images = list(df_images['0'])\n df_Galaxy_ID = pd.DataFrame([list_images[i].replace('.jpg','') for i in range(len(list_images))]).rename(columns={0:'GalaxyID'}).astype('int64')\n df = df_y.merge(df_Galaxy_ID, on='GalaxyID', how='inner')\n\n # Preparing df\n df['image'] = df_images['0']\n df_data = df.drop(columns=['GalaxyID'])\n\n return df_data\n\n if y == 'test':\n # Loading DataFrame\n df_y = pd.read_csv(f'{BUCKET_DATA_PATH}central_pixel_benchmark.csv', nrows=nrows)\n\n # Preparing df\n df_y['image'] = df_y['GalaxyID'].apply(rename)\n df_data = df_y.drop(columns=['GalaxyID'])\n\n return df_data\n return print(\"Please provide 'train' or 'test' \")\n\ndef get_full_data():\n '''returns a DataFrame with all data'''\n # Loading train\n df_y = pd.read_csv(f'{BUCKET_DATA_PATH}training_solutions_rev1.csv')\n df_images = pd.read_csv(f'{BUCKET_DATA_PATH}images_liste_train.csv')\n\n # Checking images corresponds to df\n list_images = list(df_images['0'])\n df_Galaxy_ID = pd.DataFrame([list_images[i].replace('.jpg','') for i in range(len(list_images))]).rename(columns={0:'GalaxyID'}).astype('int64')\n df_train = df_y.merge(df_Galaxy_ID, on='GalaxyID', how='inner')\n\n # Loading train\n df_test = pd.read_csv(f'{BUCKET_DATA_PATH}central_pixel_benchmark.csv')\n return {'train':df_train, 'test':df_test}\n\ndef load_image(image):\n '''returns one image'''\n folder = f'train_bucket/images_traindata_images_train_{image}'\n img = Image.open(folder)\n img_array = np.array(img)\n return np.resize(img_array, (224,224,3))\n\ndef load_images(df):\n '''returns array of images'''\n img_list = []\n for _, row in df.iterrows():\n img = load_image(row[\"image\"])\n img_list.append(img)\n return np.stack(img_list)\n\n" }, { "alpha_fraction": 0.6102550029754639, "alphanum_fraction": 0.6245446801185608, "avg_line_length": 26.229007720947266, "blob_id": "85654782e653584b449cb52b38c17976890982f4", "content_id": "9187baf7a7740c7c9d0bf2c5270f6fed552d62aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 3569, "license_type": "no_license", "max_line_length": 179, "num_lines": 131, "path": "/Makefile", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "# ----------------------------------\n# INSTALL & TEST\n# ----------------------------------\ninstall_requirements:\n\t@pip install -r requirements.txt\n\ncheck_code:\n\t@flake8 scripts/* StarWars/*.py\n\nblack:\n\t@black scripts/* StarWars/*.py\n\ntest:\n\t@coverage run -m pytest tests/*.py\n\t@coverage report -m --omit=\"${VIRTUAL_ENV}/lib/python*\"\n\nftest:\n\t@Write me\n\nclean:\n\t@rm -f */version.txt\n\t@rm -f .coverage\n\t@rm -fr */__pycache__ */*.pyc __pycache__\n\t@rm -fr build dist\n\t@rm -fr StarWars-*.dist-info\n\t@rm -fr StarWars.egg-info\n\ninstall:\n\t@pip install . -U\n\nall: clean install test black check_code\n\n\nuninstal:\n\t@python setup.py install --record files.txt\n\t@cat files.txt | xargs rm -rf\n\t@rm -f files.txt\n\ncount_lines:\n\t@find ./ -name '*.py' -exec wc -l {} \\; | sort -n| awk \\\n '{printf \"%4s %s\\n\", $$1, $$2}{s+=$$0}END{print s}'\n\t@echo ''\n\t@find ./scripts -name '*-*' -exec wc -l {} \\; | sort -n| awk \\\n\t\t '{printf \"%4s %s\\n\", $$1, $$2}{s+=$$0}END{print s}'\n\t@echo ''\n\t@find ./tests -name '*.py' -exec wc -l {} \\; | sort -n| awk \\\n '{printf \"%4s %s\\n\", $$1, $$2}{s+=$$0}END{print s}'\n\t@echo ''\n\n# ----------------------------------\n# UPLOAD PACKAGE TO PYPI\n# ----------------------------------\nPYPI_USERNAME=<AUTHOR>\nbuild:\n\t@python setup.py sdist bdist_wheel\n\npypi_test:\n\t@twine upload -r testpypi dist/* -u $(PYPI_USERNAME)\n\npypi:\n\t@twine upload dist/* -u $(PYPI_USERNAME)\n\n\n# ----------------------------------\n# CREATE BUCKET GCP\n# ----------------------------------\n\n# path of the file to upload to gcp (the path of the file should be absolute or should match the directory where the make command is run)\n\nLOCAL_PATH= '/Users/aurelienverspieren/code/AurelSann/StarWars/raw_data/images_liste_train.csv' # Replace with your local path to `data.csv` and make sure to put it between quotes\n\n\n# project id\nPROJECT_ID=wagon-bootcamp-305112 # Replace with your Project's ID\n\n# bucket name\nBUCKET_NAME=lw-komendyak-starwars # Use your Project's name as it should be unique\n\n# bucket directory in which to store the uploaded file (we choose to name this data as a convention)\nBUCKET_FOLDER=data\n\n# name for the uploaded file inside the bucket folder (here we choose to keep the name of the uploaded file)\n# BUCKET_FILE_NAME=another_file_name_if_I_so_desire.csv\nBUCKET_FILE_NAME=$(shell basename ${LOCAL_PATH})\n\nREGION=europe-west1\n\nset_project:\n\t-@gcloud config set project ${PROJECT_ID}\n\ncreate_bucket:\n\t-@gsutil mb -l ${REGION} -p ${PROJECT_ID} gs://${BUCKET_NAME}\n\nupload_data:\n\t-@gsutil cp ${LOCAL_PATH} gs://lw-verspieren-starwars/data/images_train #${BUCKET_FILE_NAME}\n\n# ----------------------------------\n# PRODUCTION and DOCKER\n# ----------------------------------\n\n# directive to run api locally\n\nrun_api:\n\tuvicorn StarWars.api.simple:app --reload\n\n# directive to run site locally\n\nrun_site:\n\tstreamlit run StarWars/api/site.py\n\n# docker directives\n\nGCP_PROJECT_ID=wagon-bootcamp-305110\nDOCKER_IMAGE_NAME=star-wars\nGCR_MULTI_REGION=eu.gcr.io\nGCR_REGION=europe-west1\n\ndocker_build:\n\tdocker build -t ${GCR_MULTI_REGION}/${GCP_PROJECT_ID}/${DOCKER_IMAGE_NAME} .\n\ndocker_it:\n\tdocker run -it -e PORT=8000 -p 8000:8000 ${GCR_MULTI_REGION}/${GCP_PROJECT_ID}/${DOCKER_IMAGE_NAME} sh\n\ndocker_run:\n\tdocker run -e PORT=8000 -p 8000:8000 ${GCR_MULTI_REGION}/${GCP_PROJECT_ID}/${DOCKER_IMAGE_NAME}\n\ndocker_push:\n\tdocker push ${GCR_MULTI_REGION}/${GCP_PROJECT_ID}/${DOCKER_IMAGE_NAME}\n\ndocker_deploy:\n\tgcloud run deploy --image ${GCR_MULTI_REGION}/${GCP_PROJECT_ID}/${DOCKER_IMAGE_NAME} --platform managed --region ${GCR_REGION}\n\n\n" }, { "alpha_fraction": 0.6335078477859497, "alphanum_fraction": 0.6684118509292603, "avg_line_length": 37.13333511352539, "blob_id": "a1ffe7aab20a93939d7382dd29b89050ee8ac406", "content_id": "1bfe7ebe245256fd8bb55595752f8134e4012e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 114, "num_lines": 15, "path": "/StarWars/ImagePrediction.py", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "\nfrom tensorflow import keras\nimport numpy as np\nfrom PIL import Image\n\nmodel = keras.models.load_model('final_model.h5')\n\n\nclass ImagePrediction():\n def __init__(self, image):\n self.image = image\n self.open = Image.open(self.image)\n self.array = np.array(self.open)\n self.resize = np.resize(self.array, (224, 224, 3))\n self.reshape = self.resize.reshape(-1, 224, 224, 3) #Reshape de l'image en vecteur de 4 dimensions\n self.prediction = model.predict(self.reshape)[0] #prediction a partir du modele --> vecteur de 37 classes\n" }, { "alpha_fraction": 0.7098138928413391, "alphanum_fraction": 0.7115059494972229, "avg_line_length": 27.14285659790039, "blob_id": "04a905723b73a7b2e1c646c49a02354310de9b68", "content_id": "3d847f27df2efc5f132a55d6a22f58f2c2b768d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1182, "license_type": "no_license", "max_line_length": 88, "num_lines": 42, "path": "/StarWars/api/simple.py", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI, File\n# import numpy as np\nfrom StarWars.decisiontree import DecisionTree\nfrom StarWars.scientistvalue import HubbleValue\nfrom StarWars.ImagePrediction import ImagePrediction\nimport tensorflow\n\nmodel = tensorflow.keras.models.load_model('final_model.h5')\n\napp = FastAPI()\n\n@app.get(\"/\")\ndef root():\n return \"coucou vas voir dans /docs\"\n\n@app.post(\"/uploadfile\")\nasync def create_upload_file(file: bytes = File(...)):\n image_path = \"image.png\"\n with open(image_path, \"wb\") as f:\n f.write(file)\n # make prediction\n pred = image_prediction(image_path)\n # decision tree\n result = process_decision_tree(pred)\n # scientific denomination\n denom = process_scientific_denomination(result)\n return {\"hubble\": denom, \"features\": result, \"pred\": list([float(f) for f in pred])}\n\ndef image_prediction(image):\n prediction = ImagePrediction(image)\n pred = prediction.prediction\n return pred\n\ndef process_decision_tree(pred):\n tree = DecisionTree(pred)\n result = tree.cat1()\n return result\n\ndef process_scientific_denomination(result):\n trainer = HubbleValue(result)\n final = trainer.final()\n return final\n" }, { "alpha_fraction": 0.515312910079956, "alphanum_fraction": 0.6013315320014954, "avg_line_length": 31.938596725463867, "blob_id": "5783beb568b10aa54d4e09ed9a9d34128cd8fe43", "content_id": "466673e6adebc3f774cc97047af4133ce1d35445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3755, "license_type": "no_license", "max_line_length": 122, "num_lines": 114, "path": "/StarWars/scientistvalue.py", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom StarWars.decisiontree import DecisionTree\n\nclass HubbleValue():\n\n def __init__(self, decision_answer):\n self.decision_answer = decision_answer\n self.hubble = ''\n\n def final(self):\n\n\n if 'bar_no' in self.decision_answer and 'arm_no' in self.decision_answer and 'odd_no' in self.decision_answer:\n self.hubble = 'S0'\n\n elif 'bar_yes' in self.decision_answer and 'arm_yes' in self.decision_answer and 'tight' in self.decision_answer:\n self.hubble = 'SBa/SBb'\n\n elif 'bar_yes' in self.decision_answer and 'arm_yes' in self.decision_answer and 'medium' in self.decision_answer:\n self.hubble = 'SBb/SBc'\n\n elif 'bar_yes' in self.decision_answer and 'arm_yes' in self.decision_answer and 'loose' in self.decision_answer:\n self.hubble = 'SBc/SBd'\n\n elif 'bar_yes' in self.decision_answer and 'arm_no' in self.decision_answer:\n self.hubble = 'SB0'\n\n\n elif 'bar_no' in self.decision_answer and 'arm_yes' in self.decision_answer and 'tight' in self.decision_answer:\n self.hubble = 'Sa/Sb'\n\n\n elif 'bar_no' in self.decision_answer and 'arm_yes' in self.decision_answer and 'medium' in self.decision_answer:\n self.hubble = 'Sb/Sc'\n\n elif 'bar_no' in self.decision_answer and 'arm_yes' in self.decision_answer and 'loose' in self.decision_answer:\n self.hubble = 'Sc/Sd'\n\n\n elif 'tight' in self.decision_answer:\n self.hubble = 'Sa/Sb'\n\n elif 'medium' in self.decision_answer:\n self.hubble = 'Sb/Sc'\n\n elif 'loose' in self.decision_answer:\n self.hubble = 'Sc/Sd'\n\n\n #Cat 9\n elif 'boxy' in self.decision_answer:\n self.hubble = 'SB'\n\n elif 'rounded' in self.decision_answer:\n self.hubble = 'S'\n\n #Cat 7\n elif 'completely_round' in self.decision_answer:\n self.hubble = 'E0-E2'\n\n elif 'in_between' in self.decision_answer:\n self.hubble = 'E3-E5'\n\n elif 'cigar_shaped' in self.decision_answer:\n self.hubble = 'E6-E7'\n\n #Cat 8\n elif 'ring' in self.decision_answer:\n self.hubble = 'ring'\n\n elif 'lens_or_arc' in self.decision_answer:\n self.hubble = 'lens or an arc'\n\n elif 'disturbed' in self.decision_answer:\n self.hubble = 'disturbed'\n\n elif 'irregular' in self.decision_answer:\n self.hubble = 'irregular'\n\n elif 'other' in self.decision_answer:\n self.hubble = 'irregular'\n\n elif 'merger' in self.decision_answer:\n self.hubble = 'merger'\n\n elif 'dust_lane' in self.decision_answer:\n self.hubble = 'dust lane'\n\n\n elif 'no_bulge2' in self.decision_answer:\n self.hubble = 'Sc/Sd'\n\n\n else:\n f'Send another picture please, this may not be a galaxy!'\n\n print(self.decision_answer)\n return self.hubble\n\nif __name__ == \"__main__\":\n prediction = np.array([0.0061849 , 0.2533387, 0.09984358, 0.00634132, 0.9516694,\n 0.31117438, 0.918507952, 0.92126181, 0.801901462, 0.15705012,\n 0.19438594, 0.03589064, 0.21429499, 0.078570501, 0.19592663,\n 0.2217514 , 0.05080319, 0.03898825, 0.01447876, 0.02751926,\n 0.02383724, 0.06303088, 0.04324729, 0.00319327, 0.06603167,\n 0.01039219, 0.02341971, 0.08231398, 0.7125346, 0.3151208,\n 0.1228107, 0.07996398, 0.01434553, 0.00636853, 0.664883,\n 0. ])\n tree = DecisionTree(prediction)\n result = tree.cat1()\n #decision_answer = ['smooth', 'completely_round', 'odd_yes', 'dust_lane']\n trainer = HubbleValue(result)\n result = trainer.final()\n print(result)\n" }, { "alpha_fraction": 0.7364341020584106, "alphanum_fraction": 0.7713178396224976, "avg_line_length": 24.799999237060547, "blob_id": "20c542285a5148a74cf30e8c849dda01662b99bb", "content_id": "f27f744052d47bc0d0c4b2eeb3f8fdab1d47dbe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 258, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/Dockerfile", "repo_name": "AurelSann/StarWars", "src_encoding": "UTF-8", "text": "FROM python:3.8.6-buster\n\nCOPY StarWars /StarWars\nCOPY requirements.txt /requirements.txt\nCOPY final_model.h5 /final_model.h5\n\nRUN pip install --upgrade pip\nRUN pip install -r requirements.txt\n\nCMD uvicorn StarWars.api.simple:app --host 0.0.0.0 --port $PORT\n" } ]
9