nakamura196 commited on
Commit
0217f42
·
1 Parent(s): 3f6ca1a

feat: add nbdev

Browse files
.github/workflows/deploy.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to GitHub Pages
2
+
3
+ permissions:
4
+ contents: write
5
+ pages: write
6
+
7
+ on:
8
+ push:
9
+ branches: [ "main", "master" ]
10
+ workflow_dispatch:
11
+ jobs:
12
+ deploy:
13
+ runs-on: ubuntu-latest
14
+ steps: [uses: fastai/workflows/quarto-ghp@master]
.github/workflows/test.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ name: CI
2
+ on: [workflow_dispatch, pull_request, push]
3
+
4
+ jobs:
5
+ test:
6
+ runs-on: ubuntu-latest
7
+ steps: [uses: fastai/workflows/nbdev-ci@master]
.gitignore CHANGED
@@ -1,2 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  .venv
2
- __pycache__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _docs/
2
+ _proc/
3
+
4
+ *.bak
5
+ .gitattributes
6
+ .last_checked
7
+ .gitconfig
8
+ *.bak
9
+ *.log
10
+ *~
11
+ ~*
12
+ _tmp*
13
+ tmp*
14
+ tags
15
+ *.pkg
16
+
17
+ # Byte-compiled / optimized / DLL files
18
+ __pycache__/
19
+ *.py[cod]
20
+ *$py.class
21
+
22
+ # C extensions
23
+ *.so
24
+
25
+ # Distribution / packaging
26
+ .Python
27
+ env/
28
+ build/
29
+ conda/
30
+ develop-eggs/
31
+ dist/
32
+ downloads/
33
+ eggs/
34
+ .eggs/
35
+ lib/
36
+ lib64/
37
+ parts/
38
+ sdist/
39
+ var/
40
+ wheels/
41
+ *.egg-info/
42
+ .installed.cfg
43
+ *.egg
44
+
45
+ # PyInstaller
46
+ # Usually these files are written by a python script from a template
47
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
48
+ *.manifest
49
+ *.spec
50
+
51
+ # Installer logs
52
+ pip-log.txt
53
+ pip-delete-this-directory.txt
54
+
55
+ # Unit test / coverage reports
56
+ htmlcov/
57
+ .tox/
58
+ .coverage
59
+ .coverage.*
60
+ .cache
61
+ nosetests.xml
62
+ coverage.xml
63
+ *.cover
64
+ .hypothesis/
65
+
66
+ # Translations
67
+ *.mo
68
+ *.pot
69
+
70
+ # Django stuff:
71
+ *.log
72
+ local_settings.py
73
+
74
+ # Flask stuff:
75
+ instance/
76
+ .webassets-cache
77
+
78
+ # Scrapy stuff:
79
+ .scrapy
80
+
81
+ # Sphinx documentation
82
+ docs/_build/
83
+
84
+ # PyBuilder
85
+ target/
86
+
87
+ # Jupyter Notebook
88
+ .ipynb_checkpoints
89
+
90
+ # pyenv
91
+ .python-version
92
+
93
+ # celery beat schedule file
94
+ celerybeat-schedule
95
+
96
+ # SageMath parsed files
97
+ *.sage.py
98
+
99
+ # dotenv
100
+ .env
101
+
102
+ # virtualenv
103
  .venv
104
+ venv/
105
+ ENV/
106
+
107
+ # Spyder project settings
108
+ .spyderproject
109
+ .spyproject
110
+
111
+ # Rope project settings
112
+ .ropeproject
113
+
114
+ # mkdocs documentation
115
+ /site
116
+
117
+ # mypy
118
+ .mypy_cache/
119
+
120
+ .vscode
121
+ *.swp
122
+
123
+ # osx generated files
124
+ .DS_Store
125
+ .DS_Store?
126
+ .Trashes
127
+ ehthumbs.db
128
+ Thumbs.db
129
+ .idea
130
+
131
+ # pytest
132
+ .pytest_cache
133
+
134
+ # tools/trust-doc-nbs
135
+ docs_src/.last_checked
136
+
137
+ # symlinks to fastai
138
+ docs_src/fastai
139
+ tools/fastai
140
+
141
+ # link checker
142
+ checklink/cookies.txt
143
+
144
+ # .gitconfig is now autogenerated
145
+ .gitconfig
146
+
147
+ # Quarto installer
148
+ .deb
149
+ .pkg
150
+
151
+ # Quarto
152
+ .quarto
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2022, fastai
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
MANIFEST.in ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include settings.ini
2
+ include LICENSE
3
+ include CONTRIBUTING.md
4
+ include README.md
5
+ recursive-exclude * __pycache__
README copy.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Genji Predict
3
+ emoji: 👁
4
+ colorFrom: purple
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 4.44.1
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -4,97 +4,24 @@
4
  類似テキスト検索を行うWebインターフェースを提供します。
5
  """
6
 
7
- import json
8
- import xml.etree.ElementTree as ET
9
 
10
  import gradio as gr
11
- from Levenshtein import ratio
12
 
13
- DATA_PATH = "./data.json"
14
-
15
- with open(DATA_PATH, "r", encoding="utf-8") as f:
16
- documents_data = json.load(f)
17
-
18
- def predict(query, selected_vols, top_n=5):
19
- """テキストの類似度を計算し、上位の結果を返す
20
-
21
- Args:
22
- query (str): 検索クエリテキスト
23
- selected_vols (list): 検索対象の巻のリスト
24
- top_n (int, optional): 返す結果の数. デフォルトは5
25
-
26
- Returns:
27
- list: スコア順にソートされた上位n件の検索結果
28
- """
29
- results = []
30
-
31
- for doc in documents_data:
32
- # 選択された巻のみを検索対象とする
33
- if not selected_vols or str(doc["vol"]) in selected_vols:
34
- score = ratio(query, doc["text"])
35
- results.append({
36
- "vol": doc["vol"],
37
- "page": doc["page"],
38
- "score": score,
39
- "text": doc["text"]
40
- })
41
 
42
- results.sort(key=lambda x: x["score"], reverse=True)
43
- top_results = results[:top_n] # top_nで指定された件数だけを取得
44
-
45
- return top_results
46
-
47
-
48
- def extract_text_from_lines(element):
49
- """本文タイプの要素からテキストを抽出する"""
50
- lines = element.findall(".//*[@type='本文']")
51
- return ''.join(line.text for line in lines)
52
 
53
- def format_prediction_result(result):
54
- """予測結果を 'vol-page' 形式にフォーマットする"""
55
- first_result = result[0]
56
- return f'{first_result["vol"]}-{first_result["page"]}'
57
 
58
- def search_similar_texts(query, selected_vols, top_n=5, xml_file=None):
59
- """テキストの類似検索を実行する関数
60
 
61
- Args:
62
- query (str): 検索クエリテキスト
63
- selected_vols (list): 検索対象の巻のリスト
64
- top_n (int, optional): 返す結果の数. デフォルトは5
65
- xml_file (gradio.File, optional): 比較対象のXMLファイル
66
 
67
- Returns:
68
- list: 検索結果のリスト。XMLファイル処理時は[predict_results]、
69
- 通常検索時は[top_results]を返す
70
- """
71
  if xml_file is not None:
72
- try:
73
- # Gradioのファイルオブジェクトから名前を取得して直接ファイルを開く
74
- xml_content = xml_file.name
75
- tree = ET.parse(xml_content)
76
- root = tree.getroot()
77
-
78
- # ページ要素の取得
79
- elements = root.findall(".//*[@type='page']")
80
-
81
- # 予測実行
82
- predict_results = {}
83
- for i, element in enumerate(elements, 1): # enumerate(elements, 1)で1から開始
84
- text = extract_text_from_lines(element)
85
- top_results = predict(text, selected_vols, 1)
86
- predict_results[str(i)] = format_prediction_result(top_results)
87
 
88
- return [predict_results]
89
-
90
- except (ET.ParseError, FileNotFoundError, PermissionError) as e:
91
- print(f"XMLファイルの処理中にエラーが発生しました: {str(e)}")
92
- return [[], {}]
93
-
94
 
95
- top_results = predict(query, selected_vols, top_n)
96
-
97
- return [top_results] # , vol_percentages
98
 
99
  # Gradioインターフェースの作成
100
  demo = gr.Interface(
 
4
  類似テキスト検索を行うWebインターフェースを提供します。
5
  """
6
 
 
 
7
 
8
  import gradio as gr
 
9
 
10
+ from genji_predict.core import ApiClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ DATA_PATH = "./data.json"
 
 
 
 
 
 
 
 
 
13
 
14
+ client = ApiClient(DATA_PATH)
 
 
 
15
 
 
 
16
 
17
+ def search_similar_texts(query, selected_vols, top_n, xml_file):
18
+ xml_file_path = None
 
 
 
19
 
 
 
 
 
20
  if xml_file is not None:
21
+ xml_file_path = xml_file.name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ return client.search_similar_texts(query, selected_vols, top_n=top_n, xml_file_path=xml_file_path)
 
 
 
 
 
24
 
 
 
 
25
 
26
  # Gradioインターフェースの作成
27
  demo = gr.Interface(
genji_predict/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.0.1"
genji_predict/_modidx.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Autogenerated by nbdev
2
+
3
+ d = { 'settings': { 'branch': 'main',
4
+ 'doc_baseurl': '/genji_predict',
5
+ 'doc_host': 'https://nakamura196.github.io',
6
+ 'git_url': 'https://github.com/nakamura196/genji_predict',
7
+ 'lib_path': 'genji_predict'},
8
+ 'syms': { 'genji_predict.core': { 'genji_predict.core.ApiClient': ('core.html#apiclient', 'genji_predict/core.py'),
9
+ 'genji_predict.core.ApiClient.__init__': ('core.html#apiclient.__init__', 'genji_predict/core.py'),
10
+ 'genji_predict.core.ApiClient.extract_text_from_lines': ( 'core.html#apiclient.extract_text_from_lines',
11
+ 'genji_predict/core.py'),
12
+ 'genji_predict.core.ApiClient.format_prediction_result': ( 'core.html#apiclient.format_prediction_result',
13
+ 'genji_predict/core.py'),
14
+ 'genji_predict.core.ApiClient.predict': ('core.html#apiclient.predict', 'genji_predict/core.py'),
15
+ 'genji_predict.core.ApiClient.search_similar_texts': ( 'core.html#apiclient.search_similar_texts',
16
+ 'genji_predict/core.py')}}}
genji_predict/core.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Fill in a module description here"""
2
+
3
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_core.ipynb.
4
+
5
+ # %% auto 0
6
+ __all__ = ['ApiClient']
7
+
8
+ # %% ../nbs/00_core.ipynb 3
9
+ import gradio as gr
10
+ from Levenshtein import ratio
11
+ import json
12
+ import xml.etree.ElementTree as ET
13
+ from tqdm import tqdm
14
+
15
+ # %% ../nbs/00_core.ipynb 4
16
+ class ApiClient:
17
+
18
+
19
+ def __init__(self, data_path: str):
20
+ DATA_PATH = data_path
21
+
22
+ with open(DATA_PATH, "r", encoding="utf-8") as f:
23
+ documents_data = json.load(f)
24
+
25
+ self.documents_data = documents_data
26
+
27
+ def extract_text_from_lines(self, element):
28
+ """本文タイプの要素からテキストを抽出する"""
29
+ lines = element.findall(".//*[@type='本文']")
30
+ return ''.join(line.text for line in lines)
31
+
32
+ def format_prediction_result(self, result):
33
+ """予測結果を 'vol-page' 形式にフォーマットする"""
34
+ first_result = result[0]
35
+ return f'{first_result["vol"]}-{first_result["page"]}'
36
+
37
+
38
+ def search_similar_texts(self, query, selected_vols, top_n=5, xml_file_path=None):
39
+ """テキストの類似検索を実行する関数
40
+
41
+ Args:
42
+ query (str): 検索クエリテキスト
43
+ selected_vols (list): 検索対象の巻のリスト
44
+ top_n (int, optional): 返す結果の数. デフォルトは5
45
+ xml_file (gradio.File, optional): 比較対象のXMLファイル
46
+
47
+ Returns:
48
+ list: 検索結果のリスト。XMLファイル処理時は[predict_results]、
49
+ 通常検索時は[top_results]を返す
50
+ """
51
+ if xml_file_path is not None:
52
+
53
+ try:
54
+ with open(xml_file_path, "r", encoding="utf-8") as f:
55
+ xml_str = f.read()
56
+
57
+ root = ET.fromstring(xml_str)
58
+
59
+ # ページ要素の取得
60
+ elements = root.findall(".//*[@type='page']")
61
+
62
+ # 予測実行
63
+ predict_results = {}
64
+ for i, element in tqdm(enumerate(elements, 1)):
65
+ text = self.extract_text_from_lines(element)
66
+ top_results = self.predict(text, selected_vols, 1)
67
+ predict_results[str(i)] = self.format_prediction_result(top_results)
68
+
69
+ return [predict_results]
70
+
71
+ except (ET.ParseError, FileNotFoundError, PermissionError) as e:
72
+ print(f"XMLファイルの処理中にエラーが発生しました: {str(e)}")
73
+ return [[], {}]
74
+
75
+
76
+ top_results = self.predict(query, selected_vols, top_n)
77
+
78
+ return [top_results] # , vol_percentages
79
+
80
+
81
+ def predict(self, query, selected_vols, top_n=5):
82
+ """テキストの類似度を計算し、上位の結果を返す
83
+
84
+ Args:
85
+ query (str): 検索クエリテキスト
86
+ selected_vols (list): 検索対象の巻のリスト
87
+ top_n (int, optional): 返す結果の数. デフォルトは5
88
+
89
+ Returns:
90
+ list: スコア順にソートされた上位n件の検索結果
91
+ """
92
+ results = []
93
+
94
+ for doc in self.documents_data:
95
+ # 選択された巻のみを検索対象とする
96
+ if not selected_vols or str(doc["vol"]) in selected_vols:
97
+ score = ratio(query, doc["text"])
98
+ results.append({
99
+ "vol": doc["vol"],
100
+ "page": doc["page"],
101
+ "score": score,
102
+ "text": doc["text"]
103
+ })
104
+
105
+ results.sort(key=lambda x: x["score"], reverse=True)
106
+ top_results = results[:top_n] # top_nで指定された件数だけを取得
107
+
108
+ return top_results
nbs/00_core.ipynb ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# core\n",
8
+ "\n",
9
+ "> Fill in a module description here"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "#| default_exp core"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": [
27
+ "#| hide\n",
28
+ "from nbdev.showdoc import *"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": null,
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "#| export\n",
38
+ "import gradio as gr\n",
39
+ "from Levenshtein import ratio\n",
40
+ "import json\n",
41
+ "import xml.etree.ElementTree as ET\n",
42
+ "from tqdm import tqdm"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": null,
48
+ "metadata": {},
49
+ "outputs": [],
50
+ "source": [
51
+ "#| export\n",
52
+ "class ApiClient:\n",
53
+ "\n",
54
+ "\n",
55
+ " def __init__(self, data_path: str):\n",
56
+ " DATA_PATH = data_path\n",
57
+ "\n",
58
+ " with open(DATA_PATH, \"r\", encoding=\"utf-8\") as f:\n",
59
+ " documents_data = json.load(f)\n",
60
+ "\n",
61
+ " self.documents_data = documents_data\n",
62
+ "\n",
63
+ " def extract_text_from_lines(self, element):\n",
64
+ " \"\"\"本文タイプの要素からテキストを抽出する\"\"\"\n",
65
+ " lines = element.findall(\".//*[@type='本文']\")\n",
66
+ " return ''.join(line.text for line in lines)\n",
67
+ "\n",
68
+ " def format_prediction_result(self, result):\n",
69
+ " \"\"\"予測結果を 'vol-page' 形式にフォーマットする\"\"\"\n",
70
+ " first_result = result[0]\n",
71
+ " return f'{first_result[\"vol\"]}-{first_result[\"page\"]}'\n",
72
+ "\n",
73
+ "\n",
74
+ " def search_similar_texts(self, query, selected_vols, top_n=5, xml_file_path=None):\n",
75
+ " \"\"\"テキストの類似検索を実行する関数\n",
76
+ "\n",
77
+ " Args:\n",
78
+ " query (str): 検索クエリテキスト\n",
79
+ " selected_vols (list): 検索対象の巻のリスト\n",
80
+ " top_n (int, optional): 返す結果の数. デフォルトは5\n",
81
+ " xml_file (gradio.File, optional): 比較対象のXMLファイル\n",
82
+ "\n",
83
+ " Returns:\n",
84
+ " list: 検索結果のリスト。XMLファイル処理時は[predict_results]、\n",
85
+ " 通常検索時は[top_results]を返す\n",
86
+ " \"\"\"\n",
87
+ " if xml_file_path is not None:\n",
88
+ " \n",
89
+ " try:\n",
90
+ " with open(xml_file_path, \"r\", encoding=\"utf-8\") as f:\n",
91
+ " xml_str = f.read()\n",
92
+ " \n",
93
+ " root = ET.fromstring(xml_str)\n",
94
+ " \n",
95
+ " # ページ要素の取得\n",
96
+ " elements = root.findall(\".//*[@type='page']\")\n",
97
+ "\n",
98
+ " # 予測実行\n",
99
+ " predict_results = {}\n",
100
+ " for i, element in tqdm(enumerate(elements, 1)):\n",
101
+ " text = self.extract_text_from_lines(element)\n",
102
+ " top_results = self.predict(text, selected_vols, 1)\n",
103
+ " predict_results[str(i)] = self.format_prediction_result(top_results)\n",
104
+ "\n",
105
+ " return [predict_results]\n",
106
+ " \n",
107
+ " except (ET.ParseError, FileNotFoundError, PermissionError) as e:\n",
108
+ " print(f\"XMLファイルの処理中にエラーが発生しました: {str(e)}\")\n",
109
+ " return [[], {}]\n",
110
+ " \n",
111
+ "\n",
112
+ " top_results = self.predict(query, selected_vols, top_n)\n",
113
+ " \n",
114
+ " return [top_results] # , vol_percentages\n",
115
+ " \n",
116
+ "\n",
117
+ " def predict(self, query, selected_vols, top_n=5):\n",
118
+ " \"\"\"テキストの類似度を計算し、上位の結果を返す\n",
119
+ "\n",
120
+ " Args:\n",
121
+ " query (str): 検索クエリテキスト\n",
122
+ " selected_vols (list): 検索対象の巻のリスト\n",
123
+ " top_n (int, optional): 返す結果の数. デフォルトは5\n",
124
+ "\n",
125
+ " Returns:\n",
126
+ " list: スコア順にソートされた上位n件の検索結果\n",
127
+ " \"\"\"\n",
128
+ " results = []\n",
129
+ " \n",
130
+ " for doc in self.documents_data:\n",
131
+ " # 選択された巻のみを検索対象とする\n",
132
+ " if not selected_vols or str(doc[\"vol\"]) in selected_vols:\n",
133
+ " score = ratio(query, doc[\"text\"])\n",
134
+ " results.append({\n",
135
+ " \"vol\": doc[\"vol\"],\n",
136
+ " \"page\": doc[\"page\"],\n",
137
+ " \"score\": score,\n",
138
+ " \"text\": doc[\"text\"]\n",
139
+ " })\n",
140
+ "\n",
141
+ " results.sort(key=lambda x: x[\"score\"], reverse=True)\n",
142
+ " top_results = results[:top_n] # top_nで指定された件数だけを取得\n",
143
+ "\n",
144
+ " return top_results"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "metadata": {},
151
+ "outputs": [],
152
+ "source": [
153
+ "#| hide\n",
154
+ "import nbdev; nbdev.nbdev_export()"
155
+ ]
156
+ }
157
+ ],
158
+ "metadata": {
159
+ "kernelspec": {
160
+ "display_name": "python3",
161
+ "language": "python",
162
+ "name": "python3"
163
+ }
164
+ },
165
+ "nbformat": 4,
166
+ "nbformat_minor": 4
167
+ }
nbs/_quarto.yml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ project:
2
+ type: website
3
+
4
+ format:
5
+ html:
6
+ theme: cosmo
7
+ css: styles.css
8
+ toc: true
9
+ keep-md: true
10
+ commonmark: default
11
+
12
+ website:
13
+ twitter-card: true
14
+ open-graph: true
15
+ repo-actions: [issue]
16
+ navbar:
17
+ background: primary
18
+ search: true
19
+ sidebar:
20
+ style: floating
21
+
22
+ metadata-files: [nbdev.yml, sidebar.yml]
nbs/index.ipynb ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "#| hide\n",
10
+ "from genji_predict.core import *"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "metadata": {},
16
+ "source": [
17
+ "# genji_predict\n",
18
+ "\n",
19
+ "> "
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "markdown",
24
+ "metadata": {},
25
+ "source": [
26
+ "This file will become your README and also the index of your documentation."
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "markdown",
31
+ "metadata": {},
32
+ "source": [
33
+ "## Developer Guide"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "markdown",
38
+ "metadata": {},
39
+ "source": [
40
+ "If you are new to using `nbdev` here are some useful pointers to get you started."
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "markdown",
45
+ "metadata": {},
46
+ "source": [
47
+ "### Install genji_predict in Development mode"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "markdown",
52
+ "metadata": {},
53
+ "source": [
54
+ "```sh\n",
55
+ "# make sure genji_predict package is installed in development mode\n",
56
+ "$ pip install -e .\n",
57
+ "\n",
58
+ "# make changes under nbs/ directory\n",
59
+ "# ...\n",
60
+ "\n",
61
+ "# compile to have changes apply to genji_predict\n",
62
+ "$ nbdev_prepare\n",
63
+ "```"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "markdown",
68
+ "metadata": {},
69
+ "source": [
70
+ "## Usage"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "markdown",
75
+ "metadata": {},
76
+ "source": [
77
+ "### Installation"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "markdown",
82
+ "metadata": {},
83
+ "source": [
84
+ "Install latest from the GitHub [repository][repo]:\n",
85
+ "\n",
86
+ "```sh\n",
87
+ "$ pip install git+https://github.com/nakamura196/genji_predict.git\n",
88
+ "```\n",
89
+ "\n",
90
+ "or from [conda][conda]\n",
91
+ "\n",
92
+ "```sh\n",
93
+ "$ conda install -c nakamura196 genji_predict\n",
94
+ "```\n",
95
+ "\n",
96
+ "or from [pypi][pypi]\n",
97
+ "\n",
98
+ "\n",
99
+ "```sh\n",
100
+ "$ pip install genji_predict\n",
101
+ "```\n",
102
+ "\n",
103
+ "\n",
104
+ "[repo]: https://github.com/nakamura196/genji_predict\n",
105
+ "[docs]: https://nakamura196.github.io/genji_predict/\n",
106
+ "[pypi]: https://pypi.org/project/genji_predict/\n",
107
+ "[conda]: https://anaconda.org/nakamura196/genji_predict"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "markdown",
112
+ "metadata": {},
113
+ "source": [
114
+ "### Documentation"
115
+ ]
116
+ },
117
+ {
118
+ "cell_type": "markdown",
119
+ "metadata": {},
120
+ "source": [
121
+ "Documentation can be found hosted on this GitHub [repository][repo]'s [pages][docs]. Additionally you can find package manager specific guidelines on [conda][conda] and [pypi][pypi] respectively.\n",
122
+ "\n",
123
+ "[repo]: https://github.com/nakamura196/genji_predict\n",
124
+ "[docs]: https://nakamura196.github.io/genji_predict/\n",
125
+ "[pypi]: https://pypi.org/project/genji_predict/\n",
126
+ "[conda]: https://anaconda.org/nakamura196/genji_predict"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "markdown",
131
+ "metadata": {},
132
+ "source": [
133
+ "## How to use"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "markdown",
138
+ "metadata": {},
139
+ "source": [
140
+ "Fill me in please! Don't forget code examples:"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": null,
146
+ "metadata": {},
147
+ "outputs": [
148
+ {
149
+ "data": {
150
+ "text/plain": [
151
+ "2"
152
+ ]
153
+ },
154
+ "execution_count": null,
155
+ "metadata": {},
156
+ "output_type": "execute_result"
157
+ }
158
+ ],
159
+ "source": [
160
+ "1+1"
161
+ ]
162
+ },
163
+ {
164
+ "cell_type": "code",
165
+ "execution_count": null,
166
+ "metadata": {},
167
+ "outputs": [],
168
+ "source": []
169
+ }
170
+ ],
171
+ "metadata": {
172
+ "kernelspec": {
173
+ "display_name": "python3",
174
+ "language": "python",
175
+ "name": "python3"
176
+ }
177
+ },
178
+ "nbformat": 4,
179
+ "nbformat_minor": 4
180
+ }
nbs/nbdev.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ project:
2
+ output-dir: _docs
3
+
4
+ website:
5
+ title: "genji_predict"
6
+ site-url: "https://nakamura196.github.io/genji_predict"
7
+ description: ""
8
+ repo-branch: main
9
+ repo-url: "https://github.com/nakamura196/genji_predict"
nbs/styles.css ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .cell {
2
+ margin-bottom: 1rem;
3
+ }
4
+
5
+ .cell > .sourceCode {
6
+ margin-bottom: 0;
7
+ }
8
+
9
+ .cell-output > pre {
10
+ margin-bottom: 0;
11
+ }
12
+
13
+ .cell-output > pre, .cell-output > .sourceCode > pre, .cell-output-stdout > pre {
14
+ margin-left: 0.8rem;
15
+ margin-top: 0;
16
+ background: none;
17
+ border-left: 2px solid lightsalmon;
18
+ border-top-left-radius: 0;
19
+ border-top-right-radius: 0;
20
+ }
21
+
22
+ .cell-output > .sourceCode {
23
+ border: none;
24
+ }
25
+
26
+ .cell-output > .sourceCode {
27
+ background: none;
28
+ margin-top: 0;
29
+ }
30
+
31
+ div.description {
32
+ padding-left: 2px;
33
+ padding-top: 5px;
34
+ font-style: italic;
35
+ font-size: 135%;
36
+ opacity: 70%;
37
+ }
pyproject.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools>=64.0"]
3
+ build-backend = "setuptools.build_meta"
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  levenshtein
2
- gradio==4.44.1
 
 
 
1
  levenshtein
2
+ gradio==4.44.1
3
+ nbdev
4
+ tqdm
settings.ini ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [DEFAULT]
2
+ # All sections below are required unless otherwise specified.
3
+ # See https://github.com/AnswerDotAI/nbdev/blob/main/settings.ini for examples.
4
+
5
+ ### Python library ###
6
+ repo = genji_predict
7
+ lib_name = %(repo)s
8
+ version = 0.0.1
9
+ min_python = 3.7
10
+ license = apache2
11
+ black_formatting = False
12
+
13
+ ### nbdev ###
14
+ doc_path = _docs
15
+ lib_path = genji_predict
16
+ nbs_path = nbs
17
+ recursive = True
18
+ tst_flags = notest
19
+ put_version_in_init = True
20
+
21
+ ### Docs ###
22
+ branch = main
23
+ custom_sidebar = False
24
+ doc_host = https://%(user)s.github.io
25
+ doc_baseurl = /%(repo)s
26
+ git_url = https://github.com/%(user)s/%(repo)s
27
+ title = %(lib_name)s
28
+
29
+ ### PyPI ###
30
+ audience = Developers
31
+ author = Satoru Nakamura
32
+ author_email = na.kamura.1263@gmail.com
33
+ copyright = 2025 onwards, %(author)s
34
+ description =
35
+ keywords = nbdev jupyter notebook python
36
+ language = English
37
+ status = 3
38
+ user = nakamura196
39
+
40
+ ### Optional ###
41
+ # requirements = fastcore pandas
42
+ # dev_requirements =
43
+ # console_scripts =
44
+ # conda_user =
45
+ # package_data =
setup.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pkg_resources import parse_version
2
+ from configparser import ConfigParser
3
+ import setuptools, shlex
4
+ assert parse_version(setuptools.__version__)>=parse_version('36.2')
5
+
6
+ # note: all settings are in settings.ini; edit there, not here
7
+ config = ConfigParser(delimiters=['='])
8
+ config.read('settings.ini', encoding='utf-8')
9
+ cfg = config['DEFAULT']
10
+
11
+ cfg_keys = 'version description keywords author author_email'.split()
12
+ expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
13
+ for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
14
+ setup_cfg = {o:cfg[o] for o in cfg_keys}
15
+
16
+ licenses = {
17
+ 'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
18
+ 'mit': ('MIT License', 'OSI Approved :: MIT License'),
19
+ 'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'),
20
+ 'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'),
21
+ 'bsd3': ('BSD License', 'OSI Approved :: BSD License'),
22
+ }
23
+ statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
24
+ '4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
25
+ py_versions = '3.6 3.7 3.8 3.9 3.10 3.11 3.12'.split()
26
+
27
+ requirements = shlex.split(cfg.get('requirements', ''))
28
+ if cfg.get('pip_requirements'): requirements += shlex.split(cfg.get('pip_requirements', ''))
29
+ min_python = cfg['min_python']
30
+ lic = licenses.get(cfg['license'].lower(), (cfg['license'], None))
31
+ dev_requirements = (cfg.get('dev_requirements') or '').split()
32
+
33
+ package_data = dict()
34
+ pkg_data = cfg.get('package_data', None)
35
+ if pkg_data:
36
+ package_data[cfg['lib_name']] = pkg_data.split() # split as multiple files might be listed
37
+ # Add package data to setup_cfg for setuptools.setup(..., **setup_cfg)
38
+ setup_cfg['package_data'] = package_data
39
+
40
+ setuptools.setup(
41
+ name = cfg['lib_name'],
42
+ license = lic[0],
43
+ classifiers = [
44
+ 'Development Status :: ' + statuses[int(cfg['status'])],
45
+ 'Intended Audience :: ' + cfg['audience'].title(),
46
+ 'Natural Language :: ' + cfg['language'].title(),
47
+ ] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []),
48
+ url = cfg['git_url'],
49
+ packages = setuptools.find_packages(),
50
+ include_package_data = True,
51
+ install_requires = requirements,
52
+ extras_require={ 'dev': dev_requirements },
53
+ dependency_links = cfg.get('dep_links','').split(),
54
+ python_requires = '>=' + cfg['min_python'],
55
+ long_description = open('README.md', encoding='utf-8').read(),
56
+ long_description_content_type = 'text/markdown',
57
+ zip_safe = False,
58
+ entry_points = {
59
+ 'console_scripts': cfg.get('console_scripts','').split(),
60
+ 'nbdev': [f'{cfg.get("lib_path")}={cfg.get("lib_path")}._modidx:d']
61
+ },
62
+ **setup_cfg)
63
+
64
+
src/02_demo.ipynb ADDED
The diff for this file is too large to render. See raw diff