Bhuvaneshvar commited on
Commit
6370773
1 Parent(s): f753da3

Upload 2116 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +11 -0
  2. cmrithackathon-master/.venv/bin/Activate.ps1 +247 -0
  3. cmrithackathon-master/.venv/bin/activate +69 -0
  4. cmrithackathon-master/.venv/bin/activate.csh +26 -0
  5. cmrithackathon-master/.venv/bin/activate.fish +69 -0
  6. cmrithackathon-master/.venv/bin/f2py +8 -0
  7. cmrithackathon-master/.venv/bin/normalizer +8 -0
  8. cmrithackathon-master/.venv/bin/numpy-config +8 -0
  9. cmrithackathon-master/.venv/bin/pip +8 -0
  10. cmrithackathon-master/.venv/bin/pip3 +8 -0
  11. cmrithackathon-master/.venv/bin/pip3.11 +8 -0
  12. cmrithackathon-master/.venv/bin/python +1 -0
  13. cmrithackathon-master/.venv/bin/python3 +1 -0
  14. cmrithackathon-master/.venv/bin/python3.11 +1 -0
  15. cmrithackathon-master/.venv/bin/tqdm +8 -0
  16. cmrithackathon-master/.venv/lib/python3.11/site-packages/__pycache__/six.cpython-311.pyc +0 -0
  17. cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/__init__.py +222 -0
  18. cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/__pycache__/__init__.cpython-311.pyc +0 -0
  19. cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/__pycache__/override.cpython-311.pyc +0 -0
  20. cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/override.py +1 -0
  21. cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/INSTALLER +1 -0
  22. cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/METADATA +122 -0
  23. cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/RECORD +79 -0
  24. cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/REQUESTED +0 -0
  25. cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/WHEEL +4 -0
  26. cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/AUTHORS +49 -0
  27. cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/LICENSE +31 -0
  28. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__init__.py +840 -0
  29. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/__init__.cpython-311.pyc +0 -0
  30. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/css.cpython-311.pyc +0 -0
  31. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/dammit.cpython-311.pyc +0 -0
  32. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/diagnose.cpython-311.pyc +0 -0
  33. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/element.cpython-311.pyc +0 -0
  34. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/formatter.cpython-311.pyc +0 -0
  35. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__init__.py +636 -0
  36. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/__init__.cpython-311.pyc +0 -0
  37. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/_html5lib.cpython-311.pyc +0 -0
  38. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/_htmlparser.cpython-311.pyc +0 -0
  39. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/_lxml.cpython-311.pyc +0 -0
  40. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/_html5lib.py +481 -0
  41. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/_htmlparser.py +387 -0
  42. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/_lxml.py +388 -0
  43. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/css.py +280 -0
  44. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/dammit.py +1095 -0
  45. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/diagnose.py +233 -0
  46. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/element.py +2435 -0
  47. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/formatter.py +185 -0
  48. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/tests/__init__.py +1177 -0
  49. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/tests/__pycache__/__init__.cpython-311.pyc +0 -0
  50. cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/tests/__pycache__/test_builder.cpython-311.pyc +0 -0
.gitattributes CHANGED
@@ -33,3 +33,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
37
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/numpy.libs/libscipy_openblas64_-ff651d7f.so filter=lfs diff=lfs merge=lfs -text
38
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/numpy/_core/_multiarray_umath.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
39
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/numpy/_core/_simd.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
40
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/numpy/random/_generator.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
41
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/pandas/_libs/algos.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
42
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/pandas/_libs/groupby.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
43
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/pandas/_libs/hashtable.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
44
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/pandas/_libs/interval.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
45
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/pandas/_libs/join.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
46
+ cmrithackathon-master/.venv/lib/python3.11/site-packages/pandas/_libs/tslibs/offsets.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
cmrithackathon-master/.venv/bin/Activate.ps1 ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <#
2
+ .Synopsis
3
+ Activate a Python virtual environment for the current PowerShell session.
4
+
5
+ .Description
6
+ Pushes the python executable for a virtual environment to the front of the
7
+ $Env:PATH environment variable and sets the prompt to signify that you are
8
+ in a Python virtual environment. Makes use of the command line switches as
9
+ well as the `pyvenv.cfg` file values present in the virtual environment.
10
+
11
+ .Parameter VenvDir
12
+ Path to the directory that contains the virtual environment to activate. The
13
+ default value for this is the parent of the directory that the Activate.ps1
14
+ script is located within.
15
+
16
+ .Parameter Prompt
17
+ The prompt prefix to display when this virtual environment is activated. By
18
+ default, this prompt is the name of the virtual environment folder (VenvDir)
19
+ surrounded by parentheses and followed by a single space (ie. '(.venv) ').
20
+
21
+ .Example
22
+ Activate.ps1
23
+ Activates the Python virtual environment that contains the Activate.ps1 script.
24
+
25
+ .Example
26
+ Activate.ps1 -Verbose
27
+ Activates the Python virtual environment that contains the Activate.ps1 script,
28
+ and shows extra information about the activation as it executes.
29
+
30
+ .Example
31
+ Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
32
+ Activates the Python virtual environment located in the specified location.
33
+
34
+ .Example
35
+ Activate.ps1 -Prompt "MyPython"
36
+ Activates the Python virtual environment that contains the Activate.ps1 script,
37
+ and prefixes the current prompt with the specified string (surrounded in
38
+ parentheses) while the virtual environment is active.
39
+
40
+ .Notes
41
+ On Windows, it may be required to enable this Activate.ps1 script by setting the
42
+ execution policy for the user. You can do this by issuing the following PowerShell
43
+ command:
44
+
45
+ PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
46
+
47
+ For more information on Execution Policies:
48
+ https://go.microsoft.com/fwlink/?LinkID=135170
49
+
50
+ #>
51
+ Param(
52
+ [Parameter(Mandatory = $false)]
53
+ [String]
54
+ $VenvDir,
55
+ [Parameter(Mandatory = $false)]
56
+ [String]
57
+ $Prompt
58
+ )
59
+
60
+ <# Function declarations --------------------------------------------------- #>
61
+
62
+ <#
63
+ .Synopsis
64
+ Remove all shell session elements added by the Activate script, including the
65
+ addition of the virtual environment's Python executable from the beginning of
66
+ the PATH variable.
67
+
68
+ .Parameter NonDestructive
69
+ If present, do not remove this function from the global namespace for the
70
+ session.
71
+
72
+ #>
73
+ function global:deactivate ([switch]$NonDestructive) {
74
+ # Revert to original values
75
+
76
+ # The prior prompt:
77
+ if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
78
+ Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
79
+ Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
80
+ }
81
+
82
+ # The prior PYTHONHOME:
83
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
84
+ Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
85
+ Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
86
+ }
87
+
88
+ # The prior PATH:
89
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
90
+ Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
91
+ Remove-Item -Path Env:_OLD_VIRTUAL_PATH
92
+ }
93
+
94
+ # Just remove the VIRTUAL_ENV altogether:
95
+ if (Test-Path -Path Env:VIRTUAL_ENV) {
96
+ Remove-Item -Path env:VIRTUAL_ENV
97
+ }
98
+
99
+ # Just remove VIRTUAL_ENV_PROMPT altogether.
100
+ if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
101
+ Remove-Item -Path env:VIRTUAL_ENV_PROMPT
102
+ }
103
+
104
+ # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
105
+ if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
106
+ Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
107
+ }
108
+
109
+ # Leave deactivate function in the global namespace if requested:
110
+ if (-not $NonDestructive) {
111
+ Remove-Item -Path function:deactivate
112
+ }
113
+ }
114
+
115
+ <#
116
+ .Description
117
+ Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
118
+ given folder, and returns them in a map.
119
+
120
+ For each line in the pyvenv.cfg file, if that line can be parsed into exactly
121
+ two strings separated by `=` (with any amount of whitespace surrounding the =)
122
+ then it is considered a `key = value` line. The left hand string is the key,
123
+ the right hand is the value.
124
+
125
+ If the value starts with a `'` or a `"` then the first and last character is
126
+ stripped from the value before being captured.
127
+
128
+ .Parameter ConfigDir
129
+ Path to the directory that contains the `pyvenv.cfg` file.
130
+ #>
131
+ function Get-PyVenvConfig(
132
+ [String]
133
+ $ConfigDir
134
+ ) {
135
+ Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
136
+
137
+ # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
138
+ $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
139
+
140
+ # An empty map will be returned if no config file is found.
141
+ $pyvenvConfig = @{ }
142
+
143
+ if ($pyvenvConfigPath) {
144
+
145
+ Write-Verbose "File exists, parse `key = value` lines"
146
+ $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
147
+
148
+ $pyvenvConfigContent | ForEach-Object {
149
+ $keyval = $PSItem -split "\s*=\s*", 2
150
+ if ($keyval[0] -and $keyval[1]) {
151
+ $val = $keyval[1]
152
+
153
+ # Remove extraneous quotations around a string value.
154
+ if ("'""".Contains($val.Substring(0, 1))) {
155
+ $val = $val.Substring(1, $val.Length - 2)
156
+ }
157
+
158
+ $pyvenvConfig[$keyval[0]] = $val
159
+ Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
160
+ }
161
+ }
162
+ }
163
+ return $pyvenvConfig
164
+ }
165
+
166
+
167
+ <# Begin Activate script --------------------------------------------------- #>
168
+
169
+ # Determine the containing directory of this script
170
+ $VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
171
+ $VenvExecDir = Get-Item -Path $VenvExecPath
172
+
173
+ Write-Verbose "Activation script is located in path: '$VenvExecPath'"
174
+ Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
175
+ Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
176
+
177
+ # Set values required in priority: CmdLine, ConfigFile, Default
178
+ # First, get the location of the virtual environment, it might not be
179
+ # VenvExecDir if specified on the command line.
180
+ if ($VenvDir) {
181
+ Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
182
+ }
183
+ else {
184
+ Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
185
+ $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
186
+ Write-Verbose "VenvDir=$VenvDir"
187
+ }
188
+
189
+ # Next, read the `pyvenv.cfg` file to determine any required value such
190
+ # as `prompt`.
191
+ $pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
192
+
193
+ # Next, set the prompt from the command line, or the config file, or
194
+ # just use the name of the virtual environment folder.
195
+ if ($Prompt) {
196
+ Write-Verbose "Prompt specified as argument, using '$Prompt'"
197
+ }
198
+ else {
199
+ Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
200
+ if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
201
+ Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
202
+ $Prompt = $pyvenvCfg['prompt'];
203
+ }
204
+ else {
205
+ Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
206
+ Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
207
+ $Prompt = Split-Path -Path $venvDir -Leaf
208
+ }
209
+ }
210
+
211
+ Write-Verbose "Prompt = '$Prompt'"
212
+ Write-Verbose "VenvDir='$VenvDir'"
213
+
214
+ # Deactivate any currently active virtual environment, but leave the
215
+ # deactivate function in place.
216
+ deactivate -nondestructive
217
+
218
+ # Now set the environment variable VIRTUAL_ENV, used by many tools to determine
219
+ # that there is an activated venv.
220
+ $env:VIRTUAL_ENV = $VenvDir
221
+
222
+ if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
223
+
224
+ Write-Verbose "Setting prompt to '$Prompt'"
225
+
226
+ # Set the prompt to include the env name
227
+ # Make sure _OLD_VIRTUAL_PROMPT is global
228
+ function global:_OLD_VIRTUAL_PROMPT { "" }
229
+ Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
230
+ New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
231
+
232
+ function global:prompt {
233
+ Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
234
+ _OLD_VIRTUAL_PROMPT
235
+ }
236
+ $env:VIRTUAL_ENV_PROMPT = $Prompt
237
+ }
238
+
239
+ # Clear PYTHONHOME
240
+ if (Test-Path -Path Env:PYTHONHOME) {
241
+ Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
242
+ Remove-Item -Path Env:PYTHONHOME
243
+ }
244
+
245
+ # Add the venv to the PATH
246
+ Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
247
+ $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
cmrithackathon-master/.venv/bin/activate ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate" *from bash*
2
+ # you cannot run it directly
3
+
4
+ deactivate () {
5
+ # reset old environment variables
6
+ if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
7
+ PATH="${_OLD_VIRTUAL_PATH:-}"
8
+ export PATH
9
+ unset _OLD_VIRTUAL_PATH
10
+ fi
11
+ if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
12
+ PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
13
+ export PYTHONHOME
14
+ unset _OLD_VIRTUAL_PYTHONHOME
15
+ fi
16
+
17
+ # This should detect bash and zsh, which have a hash command that must
18
+ # be called to get it to forget past commands. Without forgetting
19
+ # past commands the $PATH changes we made may not be respected
20
+ if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
21
+ hash -r 2> /dev/null
22
+ fi
23
+
24
+ if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
25
+ PS1="${_OLD_VIRTUAL_PS1:-}"
26
+ export PS1
27
+ unset _OLD_VIRTUAL_PS1
28
+ fi
29
+
30
+ unset VIRTUAL_ENV
31
+ unset VIRTUAL_ENV_PROMPT
32
+ if [ ! "${1:-}" = "nondestructive" ] ; then
33
+ # Self destruct!
34
+ unset -f deactivate
35
+ fi
36
+ }
37
+
38
+ # unset irrelevant variables
39
+ deactivate nondestructive
40
+
41
+ VIRTUAL_ENV="/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv"
42
+ export VIRTUAL_ENV
43
+
44
+ _OLD_VIRTUAL_PATH="$PATH"
45
+ PATH="$VIRTUAL_ENV/bin:$PATH"
46
+ export PATH
47
+
48
+ # unset PYTHONHOME if set
49
+ # this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
50
+ # could use `if (set -u; : $PYTHONHOME) ;` in bash
51
+ if [ -n "${PYTHONHOME:-}" ] ; then
52
+ _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
53
+ unset PYTHONHOME
54
+ fi
55
+
56
+ if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
57
+ _OLD_VIRTUAL_PS1="${PS1:-}"
58
+ PS1="(.venv) ${PS1:-}"
59
+ export PS1
60
+ VIRTUAL_ENV_PROMPT="(.venv) "
61
+ export VIRTUAL_ENV_PROMPT
62
+ fi
63
+
64
+ # This should detect bash and zsh, which have a hash command that must
65
+ # be called to get it to forget past commands. Without forgetting
66
+ # past commands the $PATH changes we made may not be respected
67
+ if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
68
+ hash -r 2> /dev/null
69
+ fi
cmrithackathon-master/.venv/bin/activate.csh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate.csh" *from csh*.
2
+ # You cannot run it directly.
3
+ # Created by Davide Di Blasi <davidedb@gmail.com>.
4
+ # Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
5
+
6
+ alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
7
+
8
+ # Unset irrelevant variables.
9
+ deactivate nondestructive
10
+
11
+ setenv VIRTUAL_ENV "/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv"
12
+
13
+ set _OLD_VIRTUAL_PATH="$PATH"
14
+ setenv PATH "$VIRTUAL_ENV/bin:$PATH"
15
+
16
+
17
+ set _OLD_VIRTUAL_PROMPT="$prompt"
18
+
19
+ if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
20
+ set prompt = "(.venv) $prompt"
21
+ setenv VIRTUAL_ENV_PROMPT "(.venv) "
22
+ endif
23
+
24
+ alias pydoc python -m pydoc
25
+
26
+ rehash
cmrithackathon-master/.venv/bin/activate.fish ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source <venv>/bin/activate.fish" *from fish*
2
+ # (https://fishshell.com/); you cannot run it directly.
3
+
4
+ function deactivate -d "Exit virtual environment and return to normal shell environment"
5
+ # reset old environment variables
6
+ if test -n "$_OLD_VIRTUAL_PATH"
7
+ set -gx PATH $_OLD_VIRTUAL_PATH
8
+ set -e _OLD_VIRTUAL_PATH
9
+ end
10
+ if test -n "$_OLD_VIRTUAL_PYTHONHOME"
11
+ set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
12
+ set -e _OLD_VIRTUAL_PYTHONHOME
13
+ end
14
+
15
+ if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
16
+ set -e _OLD_FISH_PROMPT_OVERRIDE
17
+ # prevents error when using nested fish instances (Issue #93858)
18
+ if functions -q _old_fish_prompt
19
+ functions -e fish_prompt
20
+ functions -c _old_fish_prompt fish_prompt
21
+ functions -e _old_fish_prompt
22
+ end
23
+ end
24
+
25
+ set -e VIRTUAL_ENV
26
+ set -e VIRTUAL_ENV_PROMPT
27
+ if test "$argv[1]" != "nondestructive"
28
+ # Self-destruct!
29
+ functions -e deactivate
30
+ end
31
+ end
32
+
33
+ # Unset irrelevant variables.
34
+ deactivate nondestructive
35
+
36
+ set -gx VIRTUAL_ENV "/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv"
37
+
38
+ set -gx _OLD_VIRTUAL_PATH $PATH
39
+ set -gx PATH "$VIRTUAL_ENV/bin" $PATH
40
+
41
+ # Unset PYTHONHOME if set.
42
+ if set -q PYTHONHOME
43
+ set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
44
+ set -e PYTHONHOME
45
+ end
46
+
47
+ if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
48
+ # fish uses a function instead of an env var to generate the prompt.
49
+
50
+ # Save the current fish_prompt function as the function _old_fish_prompt.
51
+ functions -c fish_prompt _old_fish_prompt
52
+
53
+ # With the original prompt function renamed, we can override with our own.
54
+ function fish_prompt
55
+ # Save the return status of the last command.
56
+ set -l old_status $status
57
+
58
+ # Output the venv prompt; color taken from the blue of the Python logo.
59
+ printf "%s%s%s" (set_color 4B8BBE) "(.venv) " (set_color normal)
60
+
61
+ # Restore the return status of the previous command.
62
+ echo "exit $old_status" | .
63
+ # Output the original/"old" prompt.
64
+ _old_fish_prompt
65
+ end
66
+
67
+ set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
68
+ set -gx VIRTUAL_ENV_PROMPT "(.venv) "
69
+ end
cmrithackathon-master/.venv/bin/f2py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy.f2py.f2py2e import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
cmrithackathon-master/.venv/bin/normalizer ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from charset_normalizer.cli import cli_detect
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(cli_detect())
cmrithackathon-master/.venv/bin/numpy-config ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy._configtool import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
cmrithackathon-master/.venv/bin/pip ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
cmrithackathon-master/.venv/bin/pip3 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
cmrithackathon-master/.venv/bin/pip3.11 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
cmrithackathon-master/.venv/bin/python ADDED
@@ -0,0 +1 @@
 
 
1
+ python3
cmrithackathon-master/.venv/bin/python3 ADDED
@@ -0,0 +1 @@
 
 
1
+ /bin/python3
cmrithackathon-master/.venv/bin/python3.11 ADDED
@@ -0,0 +1 @@
 
 
1
+ python3
cmrithackathon-master/.venv/bin/tqdm ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/nareshkarthigeyan/Naresh/cs/code/cmrithackathon/.venv/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from tqdm.cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
cmrithackathon-master/.venv/lib/python3.11/site-packages/__pycache__/six.cpython-311.pyc ADDED
Binary file (46.4 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/__init__.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # don't import any costly modules
2
+ import sys
3
+ import os
4
+
5
+
6
+ is_pypy = '__pypy__' in sys.builtin_module_names
7
+
8
+
9
+ def warn_distutils_present():
10
+ if 'distutils' not in sys.modules:
11
+ return
12
+ if is_pypy and sys.version_info < (3, 7):
13
+ # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
14
+ # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
15
+ return
16
+ import warnings
17
+
18
+ warnings.warn(
19
+ "Distutils was imported before Setuptools, but importing Setuptools "
20
+ "also replaces the `distutils` module in `sys.modules`. This may lead "
21
+ "to undesirable behaviors or errors. To avoid these issues, avoid "
22
+ "using distutils directly, ensure that setuptools is installed in the "
23
+ "traditional way (e.g. not an editable install), and/or make sure "
24
+ "that setuptools is always imported before distutils."
25
+ )
26
+
27
+
28
+ def clear_distutils():
29
+ if 'distutils' not in sys.modules:
30
+ return
31
+ import warnings
32
+
33
+ warnings.warn("Setuptools is replacing distutils.")
34
+ mods = [
35
+ name
36
+ for name in sys.modules
37
+ if name == "distutils" or name.startswith("distutils.")
38
+ ]
39
+ for name in mods:
40
+ del sys.modules[name]
41
+
42
+
43
+ def enabled():
44
+ """
45
+ Allow selection of distutils by environment variable.
46
+ """
47
+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
48
+ return which == 'local'
49
+
50
+
51
+ def ensure_local_distutils():
52
+ import importlib
53
+
54
+ clear_distutils()
55
+
56
+ # With the DistutilsMetaFinder in place,
57
+ # perform an import to cause distutils to be
58
+ # loaded from setuptools._distutils. Ref #2906.
59
+ with shim():
60
+ importlib.import_module('distutils')
61
+
62
+ # check that submodules load as expected
63
+ core = importlib.import_module('distutils.core')
64
+ assert '_distutils' in core.__file__, core.__file__
65
+ assert 'setuptools._distutils.log' not in sys.modules
66
+
67
+
68
+ def do_override():
69
+ """
70
+ Ensure that the local copy of distutils is preferred over stdlib.
71
+
72
+ See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
73
+ for more motivation.
74
+ """
75
+ if enabled():
76
+ warn_distutils_present()
77
+ ensure_local_distutils()
78
+
79
+
80
+ class _TrivialRe:
81
+ def __init__(self, *patterns):
82
+ self._patterns = patterns
83
+
84
+ def match(self, string):
85
+ return all(pat in string for pat in self._patterns)
86
+
87
+
88
+ class DistutilsMetaFinder:
89
+ def find_spec(self, fullname, path, target=None):
90
+ # optimization: only consider top level modules and those
91
+ # found in the CPython test suite.
92
+ if path is not None and not fullname.startswith('test.'):
93
+ return
94
+
95
+ method_name = 'spec_for_{fullname}'.format(**locals())
96
+ method = getattr(self, method_name, lambda: None)
97
+ return method()
98
+
99
+ def spec_for_distutils(self):
100
+ if self.is_cpython():
101
+ return
102
+
103
+ import importlib
104
+ import importlib.abc
105
+ import importlib.util
106
+
107
+ try:
108
+ mod = importlib.import_module('setuptools._distutils')
109
+ except Exception:
110
+ # There are a couple of cases where setuptools._distutils
111
+ # may not be present:
112
+ # - An older Setuptools without a local distutils is
113
+ # taking precedence. Ref #2957.
114
+ # - Path manipulation during sitecustomize removes
115
+ # setuptools from the path but only after the hook
116
+ # has been loaded. Ref #2980.
117
+ # In either case, fall back to stdlib behavior.
118
+ return
119
+
120
+ class DistutilsLoader(importlib.abc.Loader):
121
+ def create_module(self, spec):
122
+ mod.__name__ = 'distutils'
123
+ return mod
124
+
125
+ def exec_module(self, module):
126
+ pass
127
+
128
+ return importlib.util.spec_from_loader(
129
+ 'distutils', DistutilsLoader(), origin=mod.__file__
130
+ )
131
+
132
+ @staticmethod
133
+ def is_cpython():
134
+ """
135
+ Suppress supplying distutils for CPython (build and tests).
136
+ Ref #2965 and #3007.
137
+ """
138
+ return os.path.isfile('pybuilddir.txt')
139
+
140
+ def spec_for_pip(self):
141
+ """
142
+ Ensure stdlib distutils when running under pip.
143
+ See pypa/pip#8761 for rationale.
144
+ """
145
+ if self.pip_imported_during_build():
146
+ return
147
+ clear_distutils()
148
+ self.spec_for_distutils = lambda: None
149
+
150
+ @classmethod
151
+ def pip_imported_during_build(cls):
152
+ """
153
+ Detect if pip is being imported in a build script. Ref #2355.
154
+ """
155
+ import traceback
156
+
157
+ return any(
158
+ cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
159
+ )
160
+
161
+ @staticmethod
162
+ def frame_file_is_setup(frame):
163
+ """
164
+ Return True if the indicated frame suggests a setup.py file.
165
+ """
166
+ # some frames may not have __file__ (#2940)
167
+ return frame.f_globals.get('__file__', '').endswith('setup.py')
168
+
169
+ def spec_for_sensitive_tests(self):
170
+ """
171
+ Ensure stdlib distutils when running select tests under CPython.
172
+
173
+ python/cpython#91169
174
+ """
175
+ clear_distutils()
176
+ self.spec_for_distutils = lambda: None
177
+
178
+ sensitive_tests = (
179
+ [
180
+ 'test.test_distutils',
181
+ 'test.test_peg_generator',
182
+ 'test.test_importlib',
183
+ ]
184
+ if sys.version_info < (3, 10)
185
+ else [
186
+ 'test.test_distutils',
187
+ ]
188
+ )
189
+
190
+
191
+ for name in DistutilsMetaFinder.sensitive_tests:
192
+ setattr(
193
+ DistutilsMetaFinder,
194
+ f'spec_for_{name}',
195
+ DistutilsMetaFinder.spec_for_sensitive_tests,
196
+ )
197
+
198
+
199
+ DISTUTILS_FINDER = DistutilsMetaFinder()
200
+
201
+
202
+ def add_shim():
203
+ DISTUTILS_FINDER in sys.meta_path or insert_shim()
204
+
205
+
206
+ class shim:
207
+ def __enter__(self):
208
+ insert_shim()
209
+
210
+ def __exit__(self, exc, value, tb):
211
+ remove_shim()
212
+
213
+
214
+ def insert_shim():
215
+ sys.meta_path.insert(0, DISTUTILS_FINDER)
216
+
217
+
218
+ def remove_shim():
219
+ try:
220
+ sys.meta_path.remove(DISTUTILS_FINDER)
221
+ except ValueError:
222
+ pass
cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (11.2 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/__pycache__/override.cpython-311.pyc ADDED
Binary file (347 Bytes). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/_distutils_hack/override.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __import__('_distutils_hack').do_override()
cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/METADATA ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: beautifulsoup4
3
+ Version: 4.12.3
4
+ Summary: Screen-scraping library
5
+ Project-URL: Download, https://www.crummy.com/software/BeautifulSoup/bs4/download/
6
+ Project-URL: Homepage, https://www.crummy.com/software/BeautifulSoup/bs4/
7
+ Author-email: Leonard Richardson <leonardr@segfault.org>
8
+ License: MIT License
9
+ License-File: AUTHORS
10
+ License-File: LICENSE
11
+ Keywords: HTML,XML,parse,soup
12
+ Classifier: Development Status :: 5 - Production/Stable
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
18
+ Classifier: Topic :: Text Processing :: Markup :: HTML
19
+ Classifier: Topic :: Text Processing :: Markup :: SGML
20
+ Classifier: Topic :: Text Processing :: Markup :: XML
21
+ Requires-Python: >=3.6.0
22
+ Requires-Dist: soupsieve>1.2
23
+ Provides-Extra: cchardet
24
+ Requires-Dist: cchardet; extra == 'cchardet'
25
+ Provides-Extra: chardet
26
+ Requires-Dist: chardet; extra == 'chardet'
27
+ Provides-Extra: charset-normalizer
28
+ Requires-Dist: charset-normalizer; extra == 'charset-normalizer'
29
+ Provides-Extra: html5lib
30
+ Requires-Dist: html5lib; extra == 'html5lib'
31
+ Provides-Extra: lxml
32
+ Requires-Dist: lxml; extra == 'lxml'
33
+ Description-Content-Type: text/markdown
34
+
35
+ Beautiful Soup is a library that makes it easy to scrape information
36
+ from web pages. It sits atop an HTML or XML parser, providing Pythonic
37
+ idioms for iterating, searching, and modifying the parse tree.
38
+
39
+ # Quick start
40
+
41
+ ```
42
+ >>> from bs4 import BeautifulSoup
43
+ >>> soup = BeautifulSoup("<p>Some<b>bad<i>HTML")
44
+ >>> print(soup.prettify())
45
+ <html>
46
+ <body>
47
+ <p>
48
+ Some
49
+ <b>
50
+ bad
51
+ <i>
52
+ HTML
53
+ </i>
54
+ </b>
55
+ </p>
56
+ </body>
57
+ </html>
58
+ >>> soup.find(text="bad")
59
+ 'bad'
60
+ >>> soup.i
61
+ <i>HTML</i>
62
+ #
63
+ >>> soup = BeautifulSoup("<tag1>Some<tag2/>bad<tag3>XML", "xml")
64
+ #
65
+ >>> print(soup.prettify())
66
+ <?xml version="1.0" encoding="utf-8"?>
67
+ <tag1>
68
+ Some
69
+ <tag2/>
70
+ bad
71
+ <tag3>
72
+ XML
73
+ </tag3>
74
+ </tag1>
75
+ ```
76
+
77
+ To go beyond the basics, [comprehensive documentation is available](https://www.crummy.com/software/BeautifulSoup/bs4/doc/).
78
+
79
+ # Links
80
+
81
+ * [Homepage](https://www.crummy.com/software/BeautifulSoup/bs4/)
82
+ * [Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)
83
+ * [Discussion group](https://groups.google.com/group/beautifulsoup/)
84
+ * [Development](https://code.launchpad.net/beautifulsoup/)
85
+ * [Bug tracker](https://bugs.launchpad.net/beautifulsoup/)
86
+ * [Complete changelog](https://bazaar.launchpad.net/~leonardr/beautifulsoup/bs4/view/head:/CHANGELOG)
87
+
88
+ # Note on Python 2 sunsetting
89
+
90
+ Beautiful Soup's support for Python 2 was discontinued on December 31,
91
+ 2020: one year after the sunset date for Python 2 itself. From this
92
+ point onward, new Beautiful Soup development will exclusively target
93
+ Python 3. The final release of Beautiful Soup 4 to support Python 2
94
+ was 4.9.3.
95
+
96
+ # Supporting the project
97
+
98
+ If you use Beautiful Soup as part of your professional work, please consider a
99
+ [Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-beautifulsoup4?utm_source=pypi-beautifulsoup4&utm_medium=referral&utm_campaign=readme).
100
+ This will support many of the free software projects your organization
101
+ depends on, not just Beautiful Soup.
102
+
103
+ If you use Beautiful Soup for personal projects, the best way to say
104
+ thank you is to read
105
+ [Tool Safety](https://www.crummy.com/software/BeautifulSoup/zine/), a zine I
106
+ wrote about what Beautiful Soup has taught me about software
107
+ development.
108
+
109
+ # Building the documentation
110
+
111
+ The bs4/doc/ directory contains full documentation in Sphinx
112
+ format. Run `make html` in that directory to create HTML
113
+ documentation.
114
+
115
+ # Running the unit tests
116
+
117
+ Beautiful Soup supports unit test discovery using Pytest:
118
+
119
+ ```
120
+ $ pytest
121
+ ```
122
+
cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/RECORD ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ beautifulsoup4-4.12.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ beautifulsoup4-4.12.3.dist-info/METADATA,sha256=UkOS1koIjlakIy9Q1u2yCNwDEFOUZSrLcsbV-mTInz4,3790
3
+ beautifulsoup4-4.12.3.dist-info/RECORD,,
4
+ beautifulsoup4-4.12.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ beautifulsoup4-4.12.3.dist-info/WHEEL,sha256=mRYSEL3Ih6g5a_CVMIcwiF__0Ae4_gLYh01YFNwiq1k,87
6
+ beautifulsoup4-4.12.3.dist-info/licenses/AUTHORS,sha256=uSIdbrBb1sobdXl7VrlUvuvim2dN9kF3MH4Edn0WKGE,2176
7
+ beautifulsoup4-4.12.3.dist-info/licenses/LICENSE,sha256=VbTY1LHlvIbRDvrJG3TIe8t3UmsPW57a-LnNKtxzl7I,1441
8
+ bs4/__init__.py,sha256=kq32cCtQiNjjU9XwjD0b1jdXN5WEC87nJqSSW3PhVkM,33822
9
+ bs4/__pycache__/__init__.cpython-311.pyc,,
10
+ bs4/__pycache__/css.cpython-311.pyc,,
11
+ bs4/__pycache__/dammit.cpython-311.pyc,,
12
+ bs4/__pycache__/diagnose.cpython-311.pyc,,
13
+ bs4/__pycache__/element.cpython-311.pyc,,
14
+ bs4/__pycache__/formatter.cpython-311.pyc,,
15
+ bs4/builder/__init__.py,sha256=nwb35ftjcwzOs2WkjVm1zvfi7FxSyJP-nN1YheIVT14,24566
16
+ bs4/builder/__pycache__/__init__.cpython-311.pyc,,
17
+ bs4/builder/__pycache__/_html5lib.cpython-311.pyc,,
18
+ bs4/builder/__pycache__/_htmlparser.cpython-311.pyc,,
19
+ bs4/builder/__pycache__/_lxml.cpython-311.pyc,,
20
+ bs4/builder/_html5lib.py,sha256=0w-hmPM5wWR2iDuRCR6MvY6ZPXbg_hgddym-YWqj03s,19114
21
+ bs4/builder/_htmlparser.py,sha256=_VD5Z08j6A9YYMR4y7ZTfdMzwiCBsSUQAPuHiYB-WZI,14923
22
+ bs4/builder/_lxml.py,sha256=yKdMx1kdX7H2CopwSWEYm4Sgrfkd-WDj8HbskcaLauU,14948
23
+ bs4/css.py,sha256=gqGaHRrKeCRF3gDqxzeU0uclOCeSsTpuW9gUaSnJeWc,10077
24
+ bs4/dammit.py,sha256=G0cQfsEqfwJ-FIQMkXgCJwSHMn7t9vPepCrud6fZEKk,41158
25
+ bs4/diagnose.py,sha256=uAwdDugL_67tB-BIwDIFLFbiuzGxP2wQzJJ4_bGYUrA,7195
26
+ bs4/element.py,sha256=Dsol2iehkSjk10GzYgwFyjUEgpqmYZpyaAmbL0rWM2w,92845
27
+ bs4/formatter.py,sha256=Bu4utAQYT9XDJaPPpTRM-dyxJDVLdxf_as-IU5gSY8A,7188
28
+ bs4/tests/__init__.py,sha256=NydTegds_r7MoOEuQLS6TFmTA9TwK3KxJhwEkqjCGTQ,48392
29
+ bs4/tests/__pycache__/__init__.cpython-311.pyc,,
30
+ bs4/tests/__pycache__/test_builder.cpython-311.pyc,,
31
+ bs4/tests/__pycache__/test_builder_registry.cpython-311.pyc,,
32
+ bs4/tests/__pycache__/test_css.cpython-311.pyc,,
33
+ bs4/tests/__pycache__/test_dammit.cpython-311.pyc,,
34
+ bs4/tests/__pycache__/test_docs.cpython-311.pyc,,
35
+ bs4/tests/__pycache__/test_element.cpython-311.pyc,,
36
+ bs4/tests/__pycache__/test_formatter.cpython-311.pyc,,
37
+ bs4/tests/__pycache__/test_fuzz.cpython-311.pyc,,
38
+ bs4/tests/__pycache__/test_html5lib.cpython-311.pyc,,
39
+ bs4/tests/__pycache__/test_htmlparser.cpython-311.pyc,,
40
+ bs4/tests/__pycache__/test_lxml.cpython-311.pyc,,
41
+ bs4/tests/__pycache__/test_navigablestring.cpython-311.pyc,,
42
+ bs4/tests/__pycache__/test_pageelement.cpython-311.pyc,,
43
+ bs4/tests/__pycache__/test_soup.cpython-311.pyc,,
44
+ bs4/tests/__pycache__/test_tag.cpython-311.pyc,,
45
+ bs4/tests/__pycache__/test_tree.cpython-311.pyc,,
46
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase,sha256=yUdXkbpNK7LVOQ0LBHMoqZ1rWaBfSXWytoO_xdSm7Ho,15
47
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase,sha256=Uv_dx4a43TSfoNkjU-jHW2nSXkqHFg4XdAw7SWVObUk,23
48
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase,sha256=OEyVA0Ej4FxswOElrUNt0In4s4YhrmtaxE_NHGZvGtg,30
49
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase,sha256=G4vpNBOz-RwMpi6ewEgNEa13zX0sXhmL7VHOyIcdKVQ,15347
50
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase,sha256=3d8z65o4p7Rur-RmCHoOjzqaYQ8EAtjmiBYTHNyAdl4,19469
51
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase,sha256=NfGIlit1k40Ip3mlnBkYOkIDJX6gHtjlErwl7gsBjAQ,12
52
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase,sha256=xy4i1U0nhFHcnyc5pRKS6JRMvuoCNUur-Scor6UxIGw,4317
53
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase,sha256=Q-UTYpQBUsWoMgIUspUlzveSI-41s4ABC3jajRb-K0o,11502
54
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase,sha256=2bq3S8KxZgk8EajLReHD8m4_0Lj_nrkyJAxB_z_U0D0,5
55
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase,sha256=MZDu31LPLfgu6jP9IZkrlwNes3f_sL8WFP5BChkUKdY,35
56
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440.testcase,sha256=w58r-s6besG5JwPXpnz37W2YTj9-_qxFbk6hiEnKeIQ,51495
57
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase,sha256=q8rkdMECEXKcqVhOf5zWHkSBTQeOPt0JiLg2TZiPCuk,10380
58
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase,sha256=QfzoOxKwNuqG-4xIrea6MOQLXhfAAOQJ0r9u-J6kSNs,19
59
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase,sha256=MJ2pHFuuCQUiQz1Kor2sof7LWeRERQ6QK43YNqQHg9o,47
60
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase,sha256=EItOpSdeD4ewK-qgJ9vtxennwn_huguzXgctrUT7fqE,3546
61
+ bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase,sha256=a2aJTG4FceGSJXsjtxoS8S4jk_8rZsS3aznLkeO2_dY,124
62
+ bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase,sha256=jRFRtCKlP3-3EDLc_iVRTcE6JNymv0rYcVM6qRaPrxI,2607
63
+ bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase,sha256=7NsdCiXWAhNkmoW1pvF7rbZExyLAQIWtDtSHXIsH6YU,103
64
+ bs4/tests/test_builder.py,sha256=nc2JE5EMrEf-p24qhf2R8qAV5PpFiOuNpYCmtmCjlTI,1115
65
+ bs4/tests/test_builder_registry.py,sha256=7WLj2prjSHGphebnrjQuI6JYr03Uy_c9_CkaFSQ9HRo,5114
66
+ bs4/tests/test_css.py,sha256=jCcgIWem3lyPa5AjhAk9S6fWI07hk1rg0v8coD7bEtI,17279
67
+ bs4/tests/test_dammit.py,sha256=MbSmRN6VEP0Rm56-w6Ja0TW8eC-8ZxOJ-wXWVf_hRi8,15451
68
+ bs4/tests/test_docs.py,sha256=xoAxnUfoQ7aRqGImwW_9BJDU8WNMZHIuvWqVepvWXt8,1127
69
+ bs4/tests/test_element.py,sha256=92oRSRoGk8gIXAbAGHErKzocx2MK32TqcQdUJ-dGQMo,2377
70
+ bs4/tests/test_formatter.py,sha256=eTzj91Lmhv90z-WiHjK3sBJZm0hRk0crFY1TZaXstCY,4148
71
+ bs4/tests/test_fuzz.py,sha256=_K2utiYVkZ22mvh03g8CBioFU1QDJaff1vTaDyXhxNk,6972
72
+ bs4/tests/test_html5lib.py,sha256=2-ipm-_MaPt37WTxEd5DodUTNhS4EbLFKPRaO6XSCW4,8322
73
+ bs4/tests/test_htmlparser.py,sha256=wnngcIlzjEwH21JFfu_mgt6JdpLt0ncJfLcGT7HeGw0,6256
74
+ bs4/tests/test_lxml.py,sha256=nQCmLt7bWk0id7xMumZw--PzEe1xF9PTQn3lvHyNC6I,7635
75
+ bs4/tests/test_navigablestring.py,sha256=RGSgziNf7cZnYdEPsoqL1B2I68TUJp1JmEQVxbh_ryA,5081
76
+ bs4/tests/test_pageelement.py,sha256=VdGjUxx3RhjqmNsJ92ao6VZC_YD7T8mdLkDZjosOYeE,14274
77
+ bs4/tests/test_soup.py,sha256=JmnAPLE1_GXm0wmwEUN7icdvBz9HDch-qoU2mT_TDrs,19877
78
+ bs4/tests/test_tag.py,sha256=FBPDUisDCbFmvl5HmTtN49CGo3YoUXh5Wiuw5FMLS5E,9616
79
+ bs4/tests/test_tree.py,sha256=n9nTQOzJb3-ZnZ6AkmMdZQ5TYcTUPnqHoVgal0mYXfg,48129
cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/REQUESTED ADDED
File without changes
cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.21.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/AUTHORS ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Behold, mortal, the origins of Beautiful Soup...
2
+ ================================================
3
+
4
+ Leonard Richardson is the primary maintainer.
5
+
6
+ Aaron DeVore and Isaac Muse have made significant contributions to the
7
+ code base.
8
+
9
+ Mark Pilgrim provided the encoding detection code that forms the base
10
+ of UnicodeDammit.
11
+
12
+ Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
13
+ Soup 4 working under Python 3.
14
+
15
+ Simon Willison wrote soupselect, which was used to make Beautiful Soup
16
+ support CSS selectors. Isaac Muse wrote SoupSieve, which made it
17
+ possible to _remove_ the CSS selector code from Beautiful Soup.
18
+
19
+ Sam Ruby helped with a lot of edge cases.
20
+
21
+ Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his
22
+ work in solving the nestable tags conundrum.
23
+
24
+ An incomplete list of people have contributed patches to Beautiful
25
+ Soup:
26
+
27
+ Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew
28
+ Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy,
29
+ Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris
30
+ Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer,
31
+ Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan
32
+ Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon",
33
+ Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano
34
+ Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Sepp�nen,
35
+ Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skytt�,
36
+ "Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John
37
+ Wiseman, Paul Wright, Danny Yoo
38
+
39
+ An incomplete list of people who made suggestions or found bugs or
40
+ found ways to break Beautiful Soup:
41
+
42
+ Hanno B�ck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
43
+ Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
44
+ Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
45
+ warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
46
+ Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
47
+ Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart
48
+ Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
49
+ Sousa Rocha, Yichun Wei, Per Vognsen
cmrithackathon-master/.venv/lib/python3.11/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/LICENSE ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Beautiful Soup is made available under the MIT license:
2
+
3
+ Copyright (c) Leonard Richardson
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
24
+
25
+ Beautiful Soup incorporates code from the html5lib library, which is
26
+ also made available under the MIT license. Copyright (c) James Graham
27
+ and other contributors
28
+
29
+ Beautiful Soup has an optional dependency on the soupsieve library,
30
+ which is also made available under the MIT license. Copyright (c)
31
+ Isaac Muse
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__init__.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
2
+
3
+ http://www.crummy.com/software/BeautifulSoup/
4
+
5
+ Beautiful Soup uses a pluggable XML or HTML parser to parse a
6
+ (possibly invalid) document into a tree representation. Beautiful Soup
7
+ provides methods and Pythonic idioms that make it easy to navigate,
8
+ search, and modify the parse tree.
9
+
10
+ Beautiful Soup works with Python 3.6 and up. It works better if lxml
11
+ and/or html5lib is installed.
12
+
13
+ For more than you ever wanted to know about Beautiful Soup, see the
14
+ documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
15
+ """
16
+
17
+ __author__ = "Leonard Richardson (leonardr@segfault.org)"
18
+ __version__ = "4.12.3"
19
+ __copyright__ = "Copyright (c) 2004-2024 Leonard Richardson"
20
+ # Use of this source code is governed by the MIT license.
21
+ __license__ = "MIT"
22
+
23
+ __all__ = ['BeautifulSoup']
24
+
25
+ from collections import Counter
26
+ import os
27
+ import re
28
+ import sys
29
+ import traceback
30
+ import warnings
31
+
32
+ # The very first thing we do is give a useful error if someone is
33
+ # running this code under Python 2.
34
+ if sys.version_info.major < 3:
35
+ raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.')
36
+
37
+ from .builder import (
38
+ builder_registry,
39
+ ParserRejectedMarkup,
40
+ XMLParsedAsHTMLWarning,
41
+ HTMLParserTreeBuilder
42
+ )
43
+ from .dammit import UnicodeDammit
44
+ from .element import (
45
+ CData,
46
+ Comment,
47
+ CSS,
48
+ DEFAULT_OUTPUT_ENCODING,
49
+ Declaration,
50
+ Doctype,
51
+ NavigableString,
52
+ PageElement,
53
+ ProcessingInstruction,
54
+ PYTHON_SPECIFIC_ENCODINGS,
55
+ ResultSet,
56
+ Script,
57
+ Stylesheet,
58
+ SoupStrainer,
59
+ Tag,
60
+ TemplateString,
61
+ )
62
+
63
+ # Define some custom warnings.
64
+ class GuessedAtParserWarning(UserWarning):
65
+ """The warning issued when BeautifulSoup has to guess what parser to
66
+ use -- probably because no parser was specified in the constructor.
67
+ """
68
+
69
+ class MarkupResemblesLocatorWarning(UserWarning):
70
+ """The warning issued when BeautifulSoup is given 'markup' that
71
+ actually looks like a resource locator -- a URL or a path to a file
72
+ on disk.
73
+ """
74
+
75
+
76
+ class BeautifulSoup(Tag):
77
+ """A data structure representing a parsed HTML or XML document.
78
+
79
+ Most of the methods you'll call on a BeautifulSoup object are inherited from
80
+ PageElement or Tag.
81
+
82
+ Internally, this class defines the basic interface called by the
83
+ tree builders when converting an HTML/XML document into a data
84
+ structure. The interface abstracts away the differences between
85
+ parsers. To write a new tree builder, you'll need to understand
86
+ these methods as a whole.
87
+
88
+ These methods will be called by the BeautifulSoup constructor:
89
+ * reset()
90
+ * feed(markup)
91
+
92
+ The tree builder may call these methods from its feed() implementation:
93
+ * handle_starttag(name, attrs) # See note about return value
94
+ * handle_endtag(name)
95
+ * handle_data(data) # Appends to the current data node
96
+ * endData(containerClass) # Ends the current data node
97
+
98
+ No matter how complicated the underlying parser is, you should be
99
+ able to build a tree using 'start tag' events, 'end tag' events,
100
+ 'data' events, and "done with data" events.
101
+
102
+ If you encounter an empty-element tag (aka a self-closing tag,
103
+ like HTML's <br> tag), call handle_starttag and then
104
+ handle_endtag.
105
+ """
106
+
107
+ # Since BeautifulSoup subclasses Tag, it's possible to treat it as
108
+ # a Tag with a .name. This name makes it clear the BeautifulSoup
109
+ # object isn't a real markup tag.
110
+ ROOT_TAG_NAME = '[document]'
111
+
112
+ # If the end-user gives no indication which tree builder they
113
+ # want, look for one with these features.
114
+ DEFAULT_BUILDER_FEATURES = ['html', 'fast']
115
+
116
+ # A string containing all ASCII whitespace characters, used in
117
+ # endData() to detect data chunks that seem 'empty'.
118
+ ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
119
+
120
+ NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
121
+
122
+ def __init__(self, markup="", features=None, builder=None,
123
+ parse_only=None, from_encoding=None, exclude_encodings=None,
124
+ element_classes=None, **kwargs):
125
+ """Constructor.
126
+
127
+ :param markup: A string or a file-like object representing
128
+ markup to be parsed.
129
+
130
+ :param features: Desirable features of the parser to be
131
+ used. This may be the name of a specific parser ("lxml",
132
+ "lxml-xml", "html.parser", or "html5lib") or it may be the
133
+ type of markup to be used ("html", "html5", "xml"). It's
134
+ recommended that you name a specific parser, so that
135
+ Beautiful Soup gives you the same results across platforms
136
+ and virtual environments.
137
+
138
+ :param builder: A TreeBuilder subclass to instantiate (or
139
+ instance to use) instead of looking one up based on
140
+ `features`. You only need to use this if you've implemented a
141
+ custom TreeBuilder.
142
+
143
+ :param parse_only: A SoupStrainer. Only parts of the document
144
+ matching the SoupStrainer will be considered. This is useful
145
+ when parsing part of a document that would otherwise be too
146
+ large to fit into memory.
147
+
148
+ :param from_encoding: A string indicating the encoding of the
149
+ document to be parsed. Pass this in if Beautiful Soup is
150
+ guessing wrongly about the document's encoding.
151
+
152
+ :param exclude_encodings: A list of strings indicating
153
+ encodings known to be wrong. Pass this in if you don't know
154
+ the document's encoding but you know Beautiful Soup's guess is
155
+ wrong.
156
+
157
+ :param element_classes: A dictionary mapping BeautifulSoup
158
+ classes like Tag and NavigableString, to other classes you'd
159
+ like to be instantiated instead as the parse tree is
160
+ built. This is useful for subclassing Tag or NavigableString
161
+ to modify default behavior.
162
+
163
+ :param kwargs: For backwards compatibility purposes, the
164
+ constructor accepts certain keyword arguments used in
165
+ Beautiful Soup 3. None of these arguments do anything in
166
+ Beautiful Soup 4; they will result in a warning and then be
167
+ ignored.
168
+
169
+ Apart from this, any keyword arguments passed into the
170
+ BeautifulSoup constructor are propagated to the TreeBuilder
171
+ constructor. This makes it possible to configure a
172
+ TreeBuilder by passing in arguments, not just by saying which
173
+ one to use.
174
+ """
175
+ if 'convertEntities' in kwargs:
176
+ del kwargs['convertEntities']
177
+ warnings.warn(
178
+ "BS4 does not respect the convertEntities argument to the "
179
+ "BeautifulSoup constructor. Entities are always converted "
180
+ "to Unicode characters.")
181
+
182
+ if 'markupMassage' in kwargs:
183
+ del kwargs['markupMassage']
184
+ warnings.warn(
185
+ "BS4 does not respect the markupMassage argument to the "
186
+ "BeautifulSoup constructor. The tree builder is responsible "
187
+ "for any necessary markup massage.")
188
+
189
+ if 'smartQuotesTo' in kwargs:
190
+ del kwargs['smartQuotesTo']
191
+ warnings.warn(
192
+ "BS4 does not respect the smartQuotesTo argument to the "
193
+ "BeautifulSoup constructor. Smart quotes are always converted "
194
+ "to Unicode characters.")
195
+
196
+ if 'selfClosingTags' in kwargs:
197
+ del kwargs['selfClosingTags']
198
+ warnings.warn(
199
+ "BS4 does not respect the selfClosingTags argument to the "
200
+ "BeautifulSoup constructor. The tree builder is responsible "
201
+ "for understanding self-closing tags.")
202
+
203
+ if 'isHTML' in kwargs:
204
+ del kwargs['isHTML']
205
+ warnings.warn(
206
+ "BS4 does not respect the isHTML argument to the "
207
+ "BeautifulSoup constructor. Suggest you use "
208
+ "features='lxml' for HTML and features='lxml-xml' for "
209
+ "XML.")
210
+
211
+ def deprecated_argument(old_name, new_name):
212
+ if old_name in kwargs:
213
+ warnings.warn(
214
+ 'The "%s" argument to the BeautifulSoup constructor '
215
+ 'has been renamed to "%s."' % (old_name, new_name),
216
+ DeprecationWarning, stacklevel=3
217
+ )
218
+ return kwargs.pop(old_name)
219
+ return None
220
+
221
+ parse_only = parse_only or deprecated_argument(
222
+ "parseOnlyThese", "parse_only")
223
+
224
+ from_encoding = from_encoding or deprecated_argument(
225
+ "fromEncoding", "from_encoding")
226
+
227
+ if from_encoding and isinstance(markup, str):
228
+ warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
229
+ from_encoding = None
230
+
231
+ self.element_classes = element_classes or dict()
232
+
233
+ # We need this information to track whether or not the builder
234
+ # was specified well enough that we can omit the 'you need to
235
+ # specify a parser' warning.
236
+ original_builder = builder
237
+ original_features = features
238
+
239
+ if isinstance(builder, type):
240
+ # A builder class was passed in; it needs to be instantiated.
241
+ builder_class = builder
242
+ builder = None
243
+ elif builder is None:
244
+ if isinstance(features, str):
245
+ features = [features]
246
+ if features is None or len(features) == 0:
247
+ features = self.DEFAULT_BUILDER_FEATURES
248
+ builder_class = builder_registry.lookup(*features)
249
+ if builder_class is None:
250
+ raise FeatureNotFound(
251
+ "Couldn't find a tree builder with the features you "
252
+ "requested: %s. Do you need to install a parser library?"
253
+ % ",".join(features))
254
+
255
+ # At this point either we have a TreeBuilder instance in
256
+ # builder, or we have a builder_class that we can instantiate
257
+ # with the remaining **kwargs.
258
+ if builder is None:
259
+ builder = builder_class(**kwargs)
260
+ if not original_builder and not (
261
+ original_features == builder.NAME or
262
+ original_features in builder.ALTERNATE_NAMES
263
+ ) and markup:
264
+ # The user did not tell us which TreeBuilder to use,
265
+ # and we had to guess. Issue a warning.
266
+ if builder.is_xml:
267
+ markup_type = "XML"
268
+ else:
269
+ markup_type = "HTML"
270
+
271
+ # This code adapted from warnings.py so that we get the same line
272
+ # of code as our warnings.warn() call gets, even if the answer is wrong
273
+ # (as it may be in a multithreading situation).
274
+ caller = None
275
+ try:
276
+ caller = sys._getframe(1)
277
+ except ValueError:
278
+ pass
279
+ if caller:
280
+ globals = caller.f_globals
281
+ line_number = caller.f_lineno
282
+ else:
283
+ globals = sys.__dict__
284
+ line_number= 1
285
+ filename = globals.get('__file__')
286
+ if filename:
287
+ fnl = filename.lower()
288
+ if fnl.endswith((".pyc", ".pyo")):
289
+ filename = filename[:-1]
290
+ if filename:
291
+ # If there is no filename at all, the user is most likely in a REPL,
292
+ # and the warning is not necessary.
293
+ values = dict(
294
+ filename=filename,
295
+ line_number=line_number,
296
+ parser=builder.NAME,
297
+ markup_type=markup_type
298
+ )
299
+ warnings.warn(
300
+ self.NO_PARSER_SPECIFIED_WARNING % values,
301
+ GuessedAtParserWarning, stacklevel=2
302
+ )
303
+ else:
304
+ if kwargs:
305
+ warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
306
+
307
+ self.builder = builder
308
+ self.is_xml = builder.is_xml
309
+ self.known_xml = self.is_xml
310
+ self._namespaces = dict()
311
+ self.parse_only = parse_only
312
+
313
+ if hasattr(markup, 'read'): # It's a file-type object.
314
+ markup = markup.read()
315
+ elif len(markup) <= 256 and (
316
+ (isinstance(markup, bytes) and not b'<' in markup)
317
+ or (isinstance(markup, str) and not '<' in markup)
318
+ ):
319
+ # Issue warnings for a couple beginner problems
320
+ # involving passing non-markup to Beautiful Soup.
321
+ # Beautiful Soup will still parse the input as markup,
322
+ # since that is sometimes the intended behavior.
323
+ if not self._markup_is_url(markup):
324
+ self._markup_resembles_filename(markup)
325
+
326
+ rejections = []
327
+ success = False
328
+ for (self.markup, self.original_encoding, self.declared_html_encoding,
329
+ self.contains_replacement_characters) in (
330
+ self.builder.prepare_markup(
331
+ markup, from_encoding, exclude_encodings=exclude_encodings)):
332
+ self.reset()
333
+ self.builder.initialize_soup(self)
334
+ try:
335
+ self._feed()
336
+ success = True
337
+ break
338
+ except ParserRejectedMarkup as e:
339
+ rejections.append(e)
340
+ pass
341
+
342
+ if not success:
343
+ other_exceptions = [str(e) for e in rejections]
344
+ raise ParserRejectedMarkup(
345
+ "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
346
+ )
347
+
348
+ # Clear out the markup and remove the builder's circular
349
+ # reference to this object.
350
+ self.markup = None
351
+ self.builder.soup = None
352
+
353
+ def _clone(self):
354
+ """Create a new BeautifulSoup object with the same TreeBuilder,
355
+ but not associated with any markup.
356
+
357
+ This is the first step of the deepcopy process.
358
+ """
359
+ clone = type(self)("", None, self.builder)
360
+
361
+ # Keep track of the encoding of the original document,
362
+ # since we won't be parsing it again.
363
+ clone.original_encoding = self.original_encoding
364
+ return clone
365
+
366
+ def __getstate__(self):
367
+ # Frequently a tree builder can't be pickled.
368
+ d = dict(self.__dict__)
369
+ if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
370
+ d['builder'] = type(self.builder)
371
+ # Store the contents as a Unicode string.
372
+ d['contents'] = []
373
+ d['markup'] = self.decode()
374
+
375
+ # If _most_recent_element is present, it's a Tag object left
376
+ # over from initial parse. It might not be picklable and we
377
+ # don't need it.
378
+ if '_most_recent_element' in d:
379
+ del d['_most_recent_element']
380
+ return d
381
+
382
+ def __setstate__(self, state):
383
+ # If necessary, restore the TreeBuilder by looking it up.
384
+ self.__dict__ = state
385
+ if isinstance(self.builder, type):
386
+ self.builder = self.builder()
387
+ elif not self.builder:
388
+ # We don't know which builder was used to build this
389
+ # parse tree, so use a default we know is always available.
390
+ self.builder = HTMLParserTreeBuilder()
391
+ self.builder.soup = self
392
+ self.reset()
393
+ self._feed()
394
+ return state
395
+
396
+
397
+ @classmethod
398
+ def _decode_markup(cls, markup):
399
+ """Ensure `markup` is bytes so it's safe to send into warnings.warn.
400
+
401
+ TODO: warnings.warn had this problem back in 2010 but it might not
402
+ anymore.
403
+ """
404
+ if isinstance(markup, bytes):
405
+ decoded = markup.decode('utf-8', 'replace')
406
+ else:
407
+ decoded = markup
408
+ return decoded
409
+
410
+ @classmethod
411
+ def _markup_is_url(cls, markup):
412
+ """Error-handling method to raise a warning if incoming markup looks
413
+ like a URL.
414
+
415
+ :param markup: A string.
416
+ :return: Whether or not the markup resembles a URL
417
+ closely enough to justify a warning.
418
+ """
419
+ if isinstance(markup, bytes):
420
+ space = b' '
421
+ cant_start_with = (b"http:", b"https:")
422
+ elif isinstance(markup, str):
423
+ space = ' '
424
+ cant_start_with = ("http:", "https:")
425
+ else:
426
+ return False
427
+
428
+ if any(markup.startswith(prefix) for prefix in cant_start_with):
429
+ if not space in markup:
430
+ warnings.warn(
431
+ 'The input looks more like a URL than markup. You may want to use'
432
+ ' an HTTP client like requests to get the document behind'
433
+ ' the URL, and feed that document to Beautiful Soup.',
434
+ MarkupResemblesLocatorWarning,
435
+ stacklevel=3
436
+ )
437
+ return True
438
+ return False
439
+
440
+ @classmethod
441
+ def _markup_resembles_filename(cls, markup):
442
+ """Error-handling method to raise a warning if incoming markup
443
+ resembles a filename.
444
+
445
+ :param markup: A bytestring or string.
446
+ :return: Whether or not the markup resembles a filename
447
+ closely enough to justify a warning.
448
+ """
449
+ path_characters = '/\\'
450
+ extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
451
+ if isinstance(markup, bytes):
452
+ path_characters = path_characters.encode("utf8")
453
+ extensions = [x.encode('utf8') for x in extensions]
454
+ filelike = False
455
+ if any(x in markup for x in path_characters):
456
+ filelike = True
457
+ else:
458
+ lower = markup.lower()
459
+ if any(lower.endswith(ext) for ext in extensions):
460
+ filelike = True
461
+ if filelike:
462
+ warnings.warn(
463
+ 'The input looks more like a filename than markup. You may'
464
+ ' want to open this file and pass the filehandle into'
465
+ ' Beautiful Soup.',
466
+ MarkupResemblesLocatorWarning, stacklevel=3
467
+ )
468
+ return True
469
+ return False
470
+
471
+ def _feed(self):
472
+ """Internal method that parses previously set markup, creating a large
473
+ number of Tag and NavigableString objects.
474
+ """
475
+ # Convert the document to Unicode.
476
+ self.builder.reset()
477
+
478
+ self.builder.feed(self.markup)
479
+ # Close out any unfinished strings and close all the open tags.
480
+ self.endData()
481
+ while self.currentTag.name != self.ROOT_TAG_NAME:
482
+ self.popTag()
483
+
484
+ def reset(self):
485
+ """Reset this object to a state as though it had never parsed any
486
+ markup.
487
+ """
488
+ Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
489
+ self.hidden = 1
490
+ self.builder.reset()
491
+ self.current_data = []
492
+ self.currentTag = None
493
+ self.tagStack = []
494
+ self.open_tag_counter = Counter()
495
+ self.preserve_whitespace_tag_stack = []
496
+ self.string_container_stack = []
497
+ self._most_recent_element = None
498
+ self.pushTag(self)
499
+
500
+ def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
501
+ sourceline=None, sourcepos=None, **kwattrs):
502
+ """Create a new Tag associated with this BeautifulSoup object.
503
+
504
+ :param name: The name of the new Tag.
505
+ :param namespace: The URI of the new Tag's XML namespace, if any.
506
+ :param prefix: The prefix for the new Tag's XML namespace, if any.
507
+ :param attrs: A dictionary of this Tag's attribute values; can
508
+ be used instead of `kwattrs` for attributes like 'class'
509
+ that are reserved words in Python.
510
+ :param sourceline: The line number where this tag was
511
+ (purportedly) found in its source document.
512
+ :param sourcepos: The character position within `sourceline` where this
513
+ tag was (purportedly) found.
514
+ :param kwattrs: Keyword arguments for the new Tag's attribute values.
515
+
516
+ """
517
+ kwattrs.update(attrs)
518
+ return self.element_classes.get(Tag, Tag)(
519
+ None, self.builder, name, namespace, nsprefix, kwattrs,
520
+ sourceline=sourceline, sourcepos=sourcepos
521
+ )
522
+
523
+ def string_container(self, base_class=None):
524
+ container = base_class or NavigableString
525
+
526
+ # There may be a general override of NavigableString.
527
+ container = self.element_classes.get(
528
+ container, container
529
+ )
530
+
531
+ # On top of that, we may be inside a tag that needs a special
532
+ # container class.
533
+ if self.string_container_stack and container is NavigableString:
534
+ container = self.builder.string_containers.get(
535
+ self.string_container_stack[-1].name, container
536
+ )
537
+ return container
538
+
539
+ def new_string(self, s, subclass=None):
540
+ """Create a new NavigableString associated with this BeautifulSoup
541
+ object.
542
+ """
543
+ container = self.string_container(subclass)
544
+ return container(s)
545
+
546
+ def insert_before(self, *args):
547
+ """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
548
+ it because there is nothing before or after it in the parse tree.
549
+ """
550
+ raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
551
+
552
+ def insert_after(self, *args):
553
+ """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
554
+ it because there is nothing before or after it in the parse tree.
555
+ """
556
+ raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
557
+
558
+ def popTag(self):
559
+ """Internal method called by _popToTag when a tag is closed."""
560
+ tag = self.tagStack.pop()
561
+ if tag.name in self.open_tag_counter:
562
+ self.open_tag_counter[tag.name] -= 1
563
+ if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
564
+ self.preserve_whitespace_tag_stack.pop()
565
+ if self.string_container_stack and tag == self.string_container_stack[-1]:
566
+ self.string_container_stack.pop()
567
+ #print("Pop", tag.name)
568
+ if self.tagStack:
569
+ self.currentTag = self.tagStack[-1]
570
+ return self.currentTag
571
+
572
+ def pushTag(self, tag):
573
+ """Internal method called by handle_starttag when a tag is opened."""
574
+ #print("Push", tag.name)
575
+ if self.currentTag is not None:
576
+ self.currentTag.contents.append(tag)
577
+ self.tagStack.append(tag)
578
+ self.currentTag = self.tagStack[-1]
579
+ if tag.name != self.ROOT_TAG_NAME:
580
+ self.open_tag_counter[tag.name] += 1
581
+ if tag.name in self.builder.preserve_whitespace_tags:
582
+ self.preserve_whitespace_tag_stack.append(tag)
583
+ if tag.name in self.builder.string_containers:
584
+ self.string_container_stack.append(tag)
585
+
586
+ def endData(self, containerClass=None):
587
+ """Method called by the TreeBuilder when the end of a data segment
588
+ occurs.
589
+ """
590
+ if self.current_data:
591
+ current_data = ''.join(self.current_data)
592
+ # If whitespace is not preserved, and this string contains
593
+ # nothing but ASCII spaces, replace it with a single space
594
+ # or newline.
595
+ if not self.preserve_whitespace_tag_stack:
596
+ strippable = True
597
+ for i in current_data:
598
+ if i not in self.ASCII_SPACES:
599
+ strippable = False
600
+ break
601
+ if strippable:
602
+ if '\n' in current_data:
603
+ current_data = '\n'
604
+ else:
605
+ current_data = ' '
606
+
607
+ # Reset the data collector.
608
+ self.current_data = []
609
+
610
+ # Should we add this string to the tree at all?
611
+ if self.parse_only and len(self.tagStack) <= 1 and \
612
+ (not self.parse_only.text or \
613
+ not self.parse_only.search(current_data)):
614
+ return
615
+
616
+ containerClass = self.string_container(containerClass)
617
+ o = containerClass(current_data)
618
+ self.object_was_parsed(o)
619
+
620
+ def object_was_parsed(self, o, parent=None, most_recent_element=None):
621
+ """Method called by the TreeBuilder to integrate an object into the parse tree."""
622
+ if parent is None:
623
+ parent = self.currentTag
624
+ if most_recent_element is not None:
625
+ previous_element = most_recent_element
626
+ else:
627
+ previous_element = self._most_recent_element
628
+
629
+ next_element = previous_sibling = next_sibling = None
630
+ if isinstance(o, Tag):
631
+ next_element = o.next_element
632
+ next_sibling = o.next_sibling
633
+ previous_sibling = o.previous_sibling
634
+ if previous_element is None:
635
+ previous_element = o.previous_element
636
+
637
+ fix = parent.next_element is not None
638
+
639
+ o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
640
+
641
+ self._most_recent_element = o
642
+ parent.contents.append(o)
643
+
644
+ # Check if we are inserting into an already parsed node.
645
+ if fix:
646
+ self._linkage_fixer(parent)
647
+
648
+ def _linkage_fixer(self, el):
649
+ """Make sure linkage of this fragment is sound."""
650
+
651
+ first = el.contents[0]
652
+ child = el.contents[-1]
653
+ descendant = child
654
+
655
+ if child is first and el.parent is not None:
656
+ # Parent should be linked to first child
657
+ el.next_element = child
658
+ # We are no longer linked to whatever this element is
659
+ prev_el = child.previous_element
660
+ if prev_el is not None and prev_el is not el:
661
+ prev_el.next_element = None
662
+ # First child should be linked to the parent, and no previous siblings.
663
+ child.previous_element = el
664
+ child.previous_sibling = None
665
+
666
+ # We have no sibling as we've been appended as the last.
667
+ child.next_sibling = None
668
+
669
+ # This index is a tag, dig deeper for a "last descendant"
670
+ if isinstance(child, Tag) and child.contents:
671
+ descendant = child._last_descendant(False)
672
+
673
+ # As the final step, link last descendant. It should be linked
674
+ # to the parent's next sibling (if found), else walk up the chain
675
+ # and find a parent with a sibling. It should have no next sibling.
676
+ descendant.next_element = None
677
+ descendant.next_sibling = None
678
+ target = el
679
+ while True:
680
+ if target is None:
681
+ break
682
+ elif target.next_sibling is not None:
683
+ descendant.next_element = target.next_sibling
684
+ target.next_sibling.previous_element = child
685
+ break
686
+ target = target.parent
687
+
688
+ def _popToTag(self, name, nsprefix=None, inclusivePop=True):
689
+ """Pops the tag stack up to and including the most recent
690
+ instance of the given tag.
691
+
692
+ If there are no open tags with the given name, nothing will be
693
+ popped.
694
+
695
+ :param name: Pop up to the most recent tag with this name.
696
+ :param nsprefix: The namespace prefix that goes with `name`.
697
+ :param inclusivePop: It this is false, pops the tag stack up
698
+ to but *not* including the most recent instqance of the
699
+ given tag.
700
+
701
+ """
702
+ #print("Popping to %s" % name)
703
+ if name == self.ROOT_TAG_NAME:
704
+ # The BeautifulSoup object itself can never be popped.
705
+ return
706
+
707
+ most_recently_popped = None
708
+
709
+ stack_size = len(self.tagStack)
710
+ for i in range(stack_size - 1, 0, -1):
711
+ if not self.open_tag_counter.get(name):
712
+ break
713
+ t = self.tagStack[i]
714
+ if (name == t.name and nsprefix == t.prefix):
715
+ if inclusivePop:
716
+ most_recently_popped = self.popTag()
717
+ break
718
+ most_recently_popped = self.popTag()
719
+
720
+ return most_recently_popped
721
+
722
+ def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
723
+ sourcepos=None, namespaces=None):
724
+ """Called by the tree builder when a new tag is encountered.
725
+
726
+ :param name: Name of the tag.
727
+ :param nsprefix: Namespace prefix for the tag.
728
+ :param attrs: A dictionary of attribute values.
729
+ :param sourceline: The line number where this tag was found in its
730
+ source document.
731
+ :param sourcepos: The character position within `sourceline` where this
732
+ tag was found.
733
+ :param namespaces: A dictionary of all namespace prefix mappings
734
+ currently in scope in the document.
735
+
736
+ If this method returns None, the tag was rejected by an active
737
+ SoupStrainer. You should proceed as if the tag had not occurred
738
+ in the document. For instance, if this was a self-closing tag,
739
+ don't call handle_endtag.
740
+ """
741
+ # print("Start tag %s: %s" % (name, attrs))
742
+ self.endData()
743
+
744
+ if (self.parse_only and len(self.tagStack) <= 1
745
+ and (self.parse_only.text
746
+ or not self.parse_only.search_tag(name, attrs))):
747
+ return None
748
+
749
+ tag = self.element_classes.get(Tag, Tag)(
750
+ self, self.builder, name, namespace, nsprefix, attrs,
751
+ self.currentTag, self._most_recent_element,
752
+ sourceline=sourceline, sourcepos=sourcepos,
753
+ namespaces=namespaces
754
+ )
755
+ if tag is None:
756
+ return tag
757
+ if self._most_recent_element is not None:
758
+ self._most_recent_element.next_element = tag
759
+ self._most_recent_element = tag
760
+ self.pushTag(tag)
761
+ return tag
762
+
763
+ def handle_endtag(self, name, nsprefix=None):
764
+ """Called by the tree builder when an ending tag is encountered.
765
+
766
+ :param name: Name of the tag.
767
+ :param nsprefix: Namespace prefix for the tag.
768
+ """
769
+ #print("End tag: " + name)
770
+ self.endData()
771
+ self._popToTag(name, nsprefix)
772
+
773
+ def handle_data(self, data):
774
+ """Called by the tree builder when a chunk of textual data is encountered."""
775
+ self.current_data.append(data)
776
+
777
+ def decode(self, pretty_print=False,
778
+ eventual_encoding=DEFAULT_OUTPUT_ENCODING,
779
+ formatter="minimal", iterator=None):
780
+ """Returns a string or Unicode representation of the parse tree
781
+ as an HTML or XML document.
782
+
783
+ :param pretty_print: If this is True, indentation will be used to
784
+ make the document more readable.
785
+ :param eventual_encoding: The encoding of the final document.
786
+ If this is None, the document will be a Unicode string.
787
+ """
788
+ if self.is_xml:
789
+ # Print the XML declaration
790
+ encoding_part = ''
791
+ if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
792
+ # This is a special Python encoding; it can't actually
793
+ # go into an XML document because it means nothing
794
+ # outside of Python.
795
+ eventual_encoding = None
796
+ if eventual_encoding != None:
797
+ encoding_part = ' encoding="%s"' % eventual_encoding
798
+ prefix = '<?xml version="1.0"%s?>\n' % encoding_part
799
+ else:
800
+ prefix = ''
801
+ if not pretty_print:
802
+ indent_level = None
803
+ else:
804
+ indent_level = 0
805
+ return prefix + super(BeautifulSoup, self).decode(
806
+ indent_level, eventual_encoding, formatter, iterator)
807
+
808
+ # Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
809
+ _s = BeautifulSoup
810
+ _soup = BeautifulSoup
811
+
812
+ class BeautifulStoneSoup(BeautifulSoup):
813
+ """Deprecated interface to an XML parser."""
814
+
815
+ def __init__(self, *args, **kwargs):
816
+ kwargs['features'] = 'xml'
817
+ warnings.warn(
818
+ 'The BeautifulStoneSoup class is deprecated. Instead of using '
819
+ 'it, pass features="xml" into the BeautifulSoup constructor.',
820
+ DeprecationWarning, stacklevel=2
821
+ )
822
+ super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
823
+
824
+
825
+ class StopParsing(Exception):
826
+ """Exception raised by a TreeBuilder if it's unable to continue parsing."""
827
+ pass
828
+
829
+ class FeatureNotFound(ValueError):
830
+ """Exception raised by the BeautifulSoup constructor if no parser with the
831
+ requested features is found.
832
+ """
833
+ pass
834
+
835
+
836
+ #If this file is run as a script, act as an HTML pretty-printer.
837
+ if __name__ == '__main__':
838
+ import sys
839
+ soup = BeautifulSoup(sys.stdin)
840
+ print((soup.prettify()))
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (35.8 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/css.cpython-311.pyc ADDED
Binary file (11.7 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/dammit.cpython-311.pyc ADDED
Binary file (40.2 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/diagnose.cpython-311.pyc ADDED
Binary file (13.3 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/element.cpython-311.pyc ADDED
Binary file (97.9 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/__pycache__/formatter.cpython-311.pyc ADDED
Binary file (8.96 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__init__.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use of this source code is governed by the MIT license.
2
+ __license__ = "MIT"
3
+
4
+ from collections import defaultdict
5
+ import itertools
6
+ import re
7
+ import warnings
8
+ import sys
9
+ from bs4.element import (
10
+ CharsetMetaAttributeValue,
11
+ ContentMetaAttributeValue,
12
+ RubyParenthesisString,
13
+ RubyTextString,
14
+ Stylesheet,
15
+ Script,
16
+ TemplateString,
17
+ nonwhitespace_re
18
+ )
19
+
20
+ __all__ = [
21
+ 'HTMLTreeBuilder',
22
+ 'SAXTreeBuilder',
23
+ 'TreeBuilder',
24
+ 'TreeBuilderRegistry',
25
+ ]
26
+
27
+ # Some useful features for a TreeBuilder to have.
28
+ FAST = 'fast'
29
+ PERMISSIVE = 'permissive'
30
+ STRICT = 'strict'
31
+ XML = 'xml'
32
+ HTML = 'html'
33
+ HTML_5 = 'html5'
34
+
35
+ class XMLParsedAsHTMLWarning(UserWarning):
36
+ """The warning issued when an HTML parser is used to parse
37
+ XML that is not XHTML.
38
+ """
39
+ MESSAGE = """It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor."""
40
+
41
+
42
+ class TreeBuilderRegistry(object):
43
+ """A way of looking up TreeBuilder subclasses by their name or by desired
44
+ features.
45
+ """
46
+
47
+ def __init__(self):
48
+ self.builders_for_feature = defaultdict(list)
49
+ self.builders = []
50
+
51
+ def register(self, treebuilder_class):
52
+ """Register a treebuilder based on its advertised features.
53
+
54
+ :param treebuilder_class: A subclass of Treebuilder. its .features
55
+ attribute should list its features.
56
+ """
57
+ for feature in treebuilder_class.features:
58
+ self.builders_for_feature[feature].insert(0, treebuilder_class)
59
+ self.builders.insert(0, treebuilder_class)
60
+
61
+ def lookup(self, *features):
62
+ """Look up a TreeBuilder subclass with the desired features.
63
+
64
+ :param features: A list of features to look for. If none are
65
+ provided, the most recently registered TreeBuilder subclass
66
+ will be used.
67
+ :return: A TreeBuilder subclass, or None if there's no
68
+ registered subclass with all the requested features.
69
+ """
70
+ if len(self.builders) == 0:
71
+ # There are no builders at all.
72
+ return None
73
+
74
+ if len(features) == 0:
75
+ # They didn't ask for any features. Give them the most
76
+ # recently registered builder.
77
+ return self.builders[0]
78
+
79
+ # Go down the list of features in order, and eliminate any builders
80
+ # that don't match every feature.
81
+ features = list(features)
82
+ features.reverse()
83
+ candidates = None
84
+ candidate_set = None
85
+ while len(features) > 0:
86
+ feature = features.pop()
87
+ we_have_the_feature = self.builders_for_feature.get(feature, [])
88
+ if len(we_have_the_feature) > 0:
89
+ if candidates is None:
90
+ candidates = we_have_the_feature
91
+ candidate_set = set(candidates)
92
+ else:
93
+ # Eliminate any candidates that don't have this feature.
94
+ candidate_set = candidate_set.intersection(
95
+ set(we_have_the_feature))
96
+
97
+ # The only valid candidates are the ones in candidate_set.
98
+ # Go through the original list of candidates and pick the first one
99
+ # that's in candidate_set.
100
+ if candidate_set is None:
101
+ return None
102
+ for candidate in candidates:
103
+ if candidate in candidate_set:
104
+ return candidate
105
+ return None
106
+
107
+ # The BeautifulSoup class will take feature lists from developers and use them
108
+ # to look up builders in this registry.
109
+ builder_registry = TreeBuilderRegistry()
110
+
111
+ class TreeBuilder(object):
112
+ """Turn a textual document into a Beautiful Soup object tree."""
113
+
114
+ NAME = "[Unknown tree builder]"
115
+ ALTERNATE_NAMES = []
116
+ features = []
117
+
118
+ is_xml = False
119
+ picklable = False
120
+ empty_element_tags = None # A tag will be considered an empty-element
121
+ # tag when and only when it has no contents.
122
+
123
+ # A value for these tag/attribute combinations is a space- or
124
+ # comma-separated list of CDATA, rather than a single CDATA.
125
+ DEFAULT_CDATA_LIST_ATTRIBUTES = defaultdict(list)
126
+
127
+ # Whitespace should be preserved inside these tags.
128
+ DEFAULT_PRESERVE_WHITESPACE_TAGS = set()
129
+
130
+ # The textual contents of tags with these names should be
131
+ # instantiated with some class other than NavigableString.
132
+ DEFAULT_STRING_CONTAINERS = {}
133
+
134
+ USE_DEFAULT = object()
135
+
136
+ # Most parsers don't keep track of line numbers.
137
+ TRACKS_LINE_NUMBERS = False
138
+
139
+ def __init__(self, multi_valued_attributes=USE_DEFAULT,
140
+ preserve_whitespace_tags=USE_DEFAULT,
141
+ store_line_numbers=USE_DEFAULT,
142
+ string_containers=USE_DEFAULT,
143
+ ):
144
+ """Constructor.
145
+
146
+ :param multi_valued_attributes: If this is set to None, the
147
+ TreeBuilder will not turn any values for attributes like
148
+ 'class' into lists. Setting this to a dictionary will
149
+ customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES
150
+ for an example.
151
+
152
+ Internally, these are called "CDATA list attributes", but that
153
+ probably doesn't make sense to an end-user, so the argument name
154
+ is `multi_valued_attributes`.
155
+
156
+ :param preserve_whitespace_tags: A list of tags to treat
157
+ the way <pre> tags are treated in HTML. Tags in this list
158
+ are immune from pretty-printing; their contents will always be
159
+ output as-is.
160
+
161
+ :param string_containers: A dictionary mapping tag names to
162
+ the classes that should be instantiated to contain the textual
163
+ contents of those tags. The default is to use NavigableString
164
+ for every tag, no matter what the name. You can override the
165
+ default by changing DEFAULT_STRING_CONTAINERS.
166
+
167
+ :param store_line_numbers: If the parser keeps track of the
168
+ line numbers and positions of the original markup, that
169
+ information will, by default, be stored in each corresponding
170
+ `Tag` object. You can turn this off by passing
171
+ store_line_numbers=False. If the parser you're using doesn't
172
+ keep track of this information, then setting store_line_numbers=True
173
+ will do nothing.
174
+ """
175
+ self.soup = None
176
+ if multi_valued_attributes is self.USE_DEFAULT:
177
+ multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
178
+ self.cdata_list_attributes = multi_valued_attributes
179
+ if preserve_whitespace_tags is self.USE_DEFAULT:
180
+ preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
181
+ self.preserve_whitespace_tags = preserve_whitespace_tags
182
+ if store_line_numbers == self.USE_DEFAULT:
183
+ store_line_numbers = self.TRACKS_LINE_NUMBERS
184
+ self.store_line_numbers = store_line_numbers
185
+ if string_containers == self.USE_DEFAULT:
186
+ string_containers = self.DEFAULT_STRING_CONTAINERS
187
+ self.string_containers = string_containers
188
+
189
+ def initialize_soup(self, soup):
190
+ """The BeautifulSoup object has been initialized and is now
191
+ being associated with the TreeBuilder.
192
+
193
+ :param soup: A BeautifulSoup object.
194
+ """
195
+ self.soup = soup
196
+
197
+ def reset(self):
198
+ """Do any work necessary to reset the underlying parser
199
+ for a new document.
200
+
201
+ By default, this does nothing.
202
+ """
203
+ pass
204
+
205
+ def can_be_empty_element(self, tag_name):
206
+ """Might a tag with this name be an empty-element tag?
207
+
208
+ The final markup may or may not actually present this tag as
209
+ self-closing.
210
+
211
+ For instance: an HTMLBuilder does not consider a <p> tag to be
212
+ an empty-element tag (it's not in
213
+ HTMLBuilder.empty_element_tags). This means an empty <p> tag
214
+ will be presented as "<p></p>", not "<p/>" or "<p>".
215
+
216
+ The default implementation has no opinion about which tags are
217
+ empty-element tags, so a tag will be presented as an
218
+ empty-element tag if and only if it has no children.
219
+ "<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
220
+ be left alone.
221
+
222
+ :param tag_name: The name of a markup tag.
223
+ """
224
+ if self.empty_element_tags is None:
225
+ return True
226
+ return tag_name in self.empty_element_tags
227
+
228
+ def feed(self, markup):
229
+ """Run some incoming markup through some parsing process,
230
+ populating the `BeautifulSoup` object in self.soup.
231
+
232
+ This method is not implemented in TreeBuilder; it must be
233
+ implemented in subclasses.
234
+
235
+ :return: None.
236
+ """
237
+ raise NotImplementedError()
238
+
239
+ def prepare_markup(self, markup, user_specified_encoding=None,
240
+ document_declared_encoding=None, exclude_encodings=None):
241
+ """Run any preliminary steps necessary to make incoming markup
242
+ acceptable to the parser.
243
+
244
+ :param markup: Some markup -- probably a bytestring.
245
+ :param user_specified_encoding: The user asked to try this encoding.
246
+ :param document_declared_encoding: The markup itself claims to be
247
+ in this encoding. NOTE: This argument is not used by the
248
+ calling code and can probably be removed.
249
+ :param exclude_encodings: The user asked _not_ to try any of
250
+ these encodings.
251
+
252
+ :yield: A series of 4-tuples:
253
+ (markup, encoding, declared encoding,
254
+ has undergone character replacement)
255
+
256
+ Each 4-tuple represents a strategy for converting the
257
+ document to Unicode and parsing it. Each strategy will be tried
258
+ in turn.
259
+
260
+ By default, the only strategy is to parse the markup
261
+ as-is. See `LXMLTreeBuilderForXML` and
262
+ `HTMLParserTreeBuilder` for implementations that take into
263
+ account the quirks of particular parsers.
264
+ """
265
+ yield markup, None, None, False
266
+
267
+ def test_fragment_to_document(self, fragment):
268
+ """Wrap an HTML fragment to make it look like a document.
269
+
270
+ Different parsers do this differently. For instance, lxml
271
+ introduces an empty <head> tag, and html5lib
272
+ doesn't. Abstracting this away lets us write simple tests
273
+ which run HTML fragments through the parser and compare the
274
+ results against other HTML fragments.
275
+
276
+ This method should not be used outside of tests.
277
+
278
+ :param fragment: A string -- fragment of HTML.
279
+ :return: A string -- a full HTML document.
280
+ """
281
+ return fragment
282
+
283
+ def set_up_substitutions(self, tag):
284
+ """Set up any substitutions that will need to be performed on
285
+ a `Tag` when it's output as a string.
286
+
287
+ By default, this does nothing. See `HTMLTreeBuilder` for a
288
+ case where this is used.
289
+
290
+ :param tag: A `Tag`
291
+ :return: Whether or not a substitution was performed.
292
+ """
293
+ return False
294
+
295
+ def _replace_cdata_list_attribute_values(self, tag_name, attrs):
296
+ """When an attribute value is associated with a tag that can
297
+ have multiple values for that attribute, convert the string
298
+ value to a list of strings.
299
+
300
+ Basically, replaces class="foo bar" with class=["foo", "bar"]
301
+
302
+ NOTE: This method modifies its input in place.
303
+
304
+ :param tag_name: The name of a tag.
305
+ :param attrs: A dictionary containing the tag's attributes.
306
+ Any appropriate attribute values will be modified in place.
307
+ """
308
+ if not attrs:
309
+ return attrs
310
+ if self.cdata_list_attributes:
311
+ universal = self.cdata_list_attributes.get('*', [])
312
+ tag_specific = self.cdata_list_attributes.get(
313
+ tag_name.lower(), None)
314
+ for attr in list(attrs.keys()):
315
+ if attr in universal or (tag_specific and attr in tag_specific):
316
+ # We have a "class"-type attribute whose string
317
+ # value is a whitespace-separated list of
318
+ # values. Split it into a list.
319
+ value = attrs[attr]
320
+ if isinstance(value, str):
321
+ values = nonwhitespace_re.findall(value)
322
+ else:
323
+ # html5lib sometimes calls setAttributes twice
324
+ # for the same tag when rearranging the parse
325
+ # tree. On the second call the attribute value
326
+ # here is already a list. If this happens,
327
+ # leave the value alone rather than trying to
328
+ # split it again.
329
+ values = value
330
+ attrs[attr] = values
331
+ return attrs
332
+
333
+ class SAXTreeBuilder(TreeBuilder):
334
+ """A Beautiful Soup treebuilder that listens for SAX events.
335
+
336
+ This is not currently used for anything, but it demonstrates
337
+ how a simple TreeBuilder would work.
338
+ """
339
+
340
+ def feed(self, markup):
341
+ raise NotImplementedError()
342
+
343
+ def close(self):
344
+ pass
345
+
346
+ def startElement(self, name, attrs):
347
+ attrs = dict((key[1], value) for key, value in list(attrs.items()))
348
+ #print("Start %s, %r" % (name, attrs))
349
+ self.soup.handle_starttag(name, attrs)
350
+
351
+ def endElement(self, name):
352
+ #print("End %s" % name)
353
+ self.soup.handle_endtag(name)
354
+
355
+ def startElementNS(self, nsTuple, nodeName, attrs):
356
+ # Throw away (ns, nodeName) for now.
357
+ self.startElement(nodeName, attrs)
358
+
359
+ def endElementNS(self, nsTuple, nodeName):
360
+ # Throw away (ns, nodeName) for now.
361
+ self.endElement(nodeName)
362
+ #handler.endElementNS((ns, node.nodeName), node.nodeName)
363
+
364
+ def startPrefixMapping(self, prefix, nodeValue):
365
+ # Ignore the prefix for now.
366
+ pass
367
+
368
+ def endPrefixMapping(self, prefix):
369
+ # Ignore the prefix for now.
370
+ # handler.endPrefixMapping(prefix)
371
+ pass
372
+
373
+ def characters(self, content):
374
+ self.soup.handle_data(content)
375
+
376
+ def startDocument(self):
377
+ pass
378
+
379
+ def endDocument(self):
380
+ pass
381
+
382
+
383
+ class HTMLTreeBuilder(TreeBuilder):
384
+ """This TreeBuilder knows facts about HTML.
385
+
386
+ Such as which tags are empty-element tags.
387
+ """
388
+
389
+ empty_element_tags = set([
390
+ # These are from HTML5.
391
+ 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
392
+
393
+ # These are from earlier versions of HTML and are removed in HTML5.
394
+ 'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
395
+ ])
396
+
397
+ # The HTML standard defines these as block-level elements. Beautiful
398
+ # Soup does not treat these elements differently from other elements,
399
+ # but it may do so eventually, and this information is available if
400
+ # you need to use it.
401
+ block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
402
+
403
+ # These HTML tags need special treatment so they can be
404
+ # represented by a string class other than NavigableString.
405
+ #
406
+ # For some of these tags, it's because the HTML standard defines
407
+ # an unusual content model for them. I made this list by going
408
+ # through the HTML spec
409
+ # (https://html.spec.whatwg.org/#metadata-content) and looking for
410
+ # "metadata content" elements that can contain strings.
411
+ #
412
+ # The Ruby tags (<rt> and <rp>) are here despite being normal
413
+ # "phrasing content" tags, because the content they contain is
414
+ # qualitatively different from other text in the document, and it
415
+ # can be useful to be able to distinguish it.
416
+ #
417
+ # TODO: Arguably <noscript> could go here but it seems
418
+ # qualitatively different from the other tags.
419
+ DEFAULT_STRING_CONTAINERS = {
420
+ 'rt' : RubyTextString,
421
+ 'rp' : RubyParenthesisString,
422
+ 'style': Stylesheet,
423
+ 'script': Script,
424
+ 'template': TemplateString,
425
+ }
426
+
427
+ # The HTML standard defines these attributes as containing a
428
+ # space-separated list of values, not a single value. That is,
429
+ # class="foo bar" means that the 'class' attribute has two values,
430
+ # 'foo' and 'bar', not the single value 'foo bar'. When we
431
+ # encounter one of these attributes, we will parse its value into
432
+ # a list of values if possible. Upon output, the list will be
433
+ # converted back into a string.
434
+ DEFAULT_CDATA_LIST_ATTRIBUTES = {
435
+ "*" : ['class', 'accesskey', 'dropzone'],
436
+ "a" : ['rel', 'rev'],
437
+ "link" : ['rel', 'rev'],
438
+ "td" : ["headers"],
439
+ "th" : ["headers"],
440
+ "td" : ["headers"],
441
+ "form" : ["accept-charset"],
442
+ "object" : ["archive"],
443
+
444
+ # These are HTML5 specific, as are *.accesskey and *.dropzone above.
445
+ "area" : ["rel"],
446
+ "icon" : ["sizes"],
447
+ "iframe" : ["sandbox"],
448
+ "output" : ["for"],
449
+ }
450
+
451
+ DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
452
+
453
+ def set_up_substitutions(self, tag):
454
+ """Replace the declared encoding in a <meta> tag with a placeholder,
455
+ to be substituted when the tag is output to a string.
456
+
457
+ An HTML document may come in to Beautiful Soup as one
458
+ encoding, but exit in a different encoding, and the <meta> tag
459
+ needs to be changed to reflect this.
460
+
461
+ :param tag: A `Tag`
462
+ :return: Whether or not a substitution was performed.
463
+ """
464
+ # We are only interested in <meta> tags
465
+ if tag.name != 'meta':
466
+ return False
467
+
468
+ http_equiv = tag.get('http-equiv')
469
+ content = tag.get('content')
470
+ charset = tag.get('charset')
471
+
472
+ # We are interested in <meta> tags that say what encoding the
473
+ # document was originally in. This means HTML 5-style <meta>
474
+ # tags that provide the "charset" attribute. It also means
475
+ # HTML 4-style <meta> tags that provide the "content"
476
+ # attribute and have "http-equiv" set to "content-type".
477
+ #
478
+ # In both cases we will replace the value of the appropriate
479
+ # attribute with a standin object that can take on any
480
+ # encoding.
481
+ meta_encoding = None
482
+ if charset is not None:
483
+ # HTML 5 style:
484
+ # <meta charset="utf8">
485
+ meta_encoding = charset
486
+ tag['charset'] = CharsetMetaAttributeValue(charset)
487
+
488
+ elif (content is not None and http_equiv is not None
489
+ and http_equiv.lower() == 'content-type'):
490
+ # HTML 4 style:
491
+ # <meta http-equiv="content-type" content="text/html; charset=utf8">
492
+ tag['content'] = ContentMetaAttributeValue(content)
493
+
494
+ return (meta_encoding is not None)
495
+
496
+ class DetectsXMLParsedAsHTML(object):
497
+ """A mixin class for any class (a TreeBuilder, or some class used by a
498
+ TreeBuilder) that's in a position to detect whether an XML
499
+ document is being incorrectly parsed as HTML, and issue an
500
+ appropriate warning.
501
+
502
+ This requires being able to observe an incoming processing
503
+ instruction that might be an XML declaration, and also able to
504
+ observe tags as they're opened. If you can't do that for a given
505
+ TreeBuilder, there's a less reliable implementation based on
506
+ examining the raw markup.
507
+ """
508
+
509
+ # Regular expression for seeing if markup has an <html> tag.
510
+ LOOKS_LIKE_HTML = re.compile("<[^ +]html", re.I)
511
+ LOOKS_LIKE_HTML_B = re.compile(b"<[^ +]html", re.I)
512
+
513
+ XML_PREFIX = '<?xml'
514
+ XML_PREFIX_B = b'<?xml'
515
+
516
+ @classmethod
517
+ def warn_if_markup_looks_like_xml(cls, markup, stacklevel=3):
518
+ """Perform a check on some markup to see if it looks like XML
519
+ that's not XHTML. If so, issue a warning.
520
+
521
+ This is much less reliable than doing the check while parsing,
522
+ but some of the tree builders can't do that.
523
+
524
+ :param stacklevel: The stacklevel of the code calling this
525
+ function.
526
+
527
+ :return: True if the markup looks like non-XHTML XML, False
528
+ otherwise.
529
+
530
+ """
531
+ if isinstance(markup, bytes):
532
+ prefix = cls.XML_PREFIX_B
533
+ looks_like_html = cls.LOOKS_LIKE_HTML_B
534
+ else:
535
+ prefix = cls.XML_PREFIX
536
+ looks_like_html = cls.LOOKS_LIKE_HTML
537
+
538
+ if (markup is not None
539
+ and markup.startswith(prefix)
540
+ and not looks_like_html.search(markup[:500])
541
+ ):
542
+ cls._warn(stacklevel=stacklevel+2)
543
+ return True
544
+ return False
545
+
546
+ @classmethod
547
+ def _warn(cls, stacklevel=5):
548
+ """Issue a warning about XML being parsed as HTML."""
549
+ warnings.warn(
550
+ XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning,
551
+ stacklevel=stacklevel
552
+ )
553
+
554
+ def _initialize_xml_detector(self):
555
+ """Call this method before parsing a document."""
556
+ self._first_processing_instruction = None
557
+ self._root_tag = None
558
+
559
+ def _document_might_be_xml(self, processing_instruction):
560
+ """Call this method when encountering an XML declaration, or a
561
+ "processing instruction" that might be an XML declaration.
562
+ """
563
+ if (self._first_processing_instruction is not None
564
+ or self._root_tag is not None):
565
+ # The document has already started. Don't bother checking
566
+ # anymore.
567
+ return
568
+
569
+ self._first_processing_instruction = processing_instruction
570
+
571
+ # We won't know until we encounter the first tag whether or
572
+ # not this is actually a problem.
573
+
574
+ def _root_tag_encountered(self, name):
575
+ """Call this when you encounter the document's root tag.
576
+
577
+ This is where we actually check whether an XML document is
578
+ being incorrectly parsed as HTML, and issue the warning.
579
+ """
580
+ if self._root_tag is not None:
581
+ # This method was incorrectly called multiple times. Do
582
+ # nothing.
583
+ return
584
+
585
+ self._root_tag = name
586
+ if (name != 'html' and self._first_processing_instruction is not None
587
+ and self._first_processing_instruction.lower().startswith('xml ')):
588
+ # We encountered an XML declaration and then a tag other
589
+ # than 'html'. This is a reliable indicator that a
590
+ # non-XHTML document is being parsed as XML.
591
+ self._warn()
592
+
593
+
594
+ def register_treebuilders_from(module):
595
+ """Copy TreeBuilders from the given module into this module."""
596
+ this_module = sys.modules[__name__]
597
+ for name in module.__all__:
598
+ obj = getattr(module, name)
599
+
600
+ if issubclass(obj, TreeBuilder):
601
+ setattr(this_module, name, obj)
602
+ this_module.__all__.append(name)
603
+ # Register the builder while we're at it.
604
+ this_module.builder_registry.register(obj)
605
+
606
+ class ParserRejectedMarkup(Exception):
607
+ """An Exception to be raised when the underlying parser simply
608
+ refuses to parse the given markup.
609
+ """
610
+ def __init__(self, message_or_exception):
611
+ """Explain why the parser rejected the given markup, either
612
+ with a textual explanation or another exception.
613
+ """
614
+ if isinstance(message_or_exception, Exception):
615
+ e = message_or_exception
616
+ message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
617
+ super(ParserRejectedMarkup, self).__init__(message_or_exception)
618
+
619
+ # Builders are registered in reverse order of priority, so that custom
620
+ # builder registrations will take precedence. In general, we want lxml
621
+ # to take precedence over html5lib, because it's faster. And we only
622
+ # want to use HTMLParser as a last resort.
623
+ from . import _htmlparser
624
+ register_treebuilders_from(_htmlparser)
625
+ try:
626
+ from . import _html5lib
627
+ register_treebuilders_from(_html5lib)
628
+ except ImportError:
629
+ # They don't have html5lib installed.
630
+ pass
631
+ try:
632
+ from . import _lxml
633
+ register_treebuilders_from(_lxml)
634
+ except ImportError:
635
+ # They don't have lxml installed.
636
+ pass
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (25.1 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/_html5lib.cpython-311.pyc ADDED
Binary file (22.4 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/_htmlparser.cpython-311.pyc ADDED
Binary file (14.4 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/__pycache__/_lxml.cpython-311.pyc ADDED
Binary file (16.6 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/_html5lib.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use of this source code is governed by the MIT license.
2
+ __license__ = "MIT"
3
+
4
+ __all__ = [
5
+ 'HTML5TreeBuilder',
6
+ ]
7
+
8
+ import warnings
9
+ import re
10
+ from bs4.builder import (
11
+ DetectsXMLParsedAsHTML,
12
+ PERMISSIVE,
13
+ HTML,
14
+ HTML_5,
15
+ HTMLTreeBuilder,
16
+ )
17
+ from bs4.element import (
18
+ NamespacedAttribute,
19
+ nonwhitespace_re,
20
+ )
21
+ import html5lib
22
+ from html5lib.constants import (
23
+ namespaces,
24
+ prefixes,
25
+ )
26
+ from bs4.element import (
27
+ Comment,
28
+ Doctype,
29
+ NavigableString,
30
+ Tag,
31
+ )
32
+
33
+ try:
34
+ # Pre-0.99999999
35
+ from html5lib.treebuilders import _base as treebuilder_base
36
+ new_html5lib = False
37
+ except ImportError as e:
38
+ # 0.99999999 and up
39
+ from html5lib.treebuilders import base as treebuilder_base
40
+ new_html5lib = True
41
+
42
+ class HTML5TreeBuilder(HTMLTreeBuilder):
43
+ """Use html5lib to build a tree.
44
+
45
+ Note that this TreeBuilder does not support some features common
46
+ to HTML TreeBuilders. Some of these features could theoretically
47
+ be implemented, but at the very least it's quite difficult,
48
+ because html5lib moves the parse tree around as it's being built.
49
+
50
+ * This TreeBuilder doesn't use different subclasses of NavigableString
51
+ based on the name of the tag in which the string was found.
52
+
53
+ * You can't use a SoupStrainer to parse only part of a document.
54
+ """
55
+
56
+ NAME = "html5lib"
57
+
58
+ features = [NAME, PERMISSIVE, HTML_5, HTML]
59
+
60
+ # html5lib can tell us which line number and position in the
61
+ # original file is the source of an element.
62
+ TRACKS_LINE_NUMBERS = True
63
+
64
+ def prepare_markup(self, markup, user_specified_encoding,
65
+ document_declared_encoding=None, exclude_encodings=None):
66
+ # Store the user-specified encoding for use later on.
67
+ self.user_specified_encoding = user_specified_encoding
68
+
69
+ # document_declared_encoding and exclude_encodings aren't used
70
+ # ATM because the html5lib TreeBuilder doesn't use
71
+ # UnicodeDammit.
72
+ if exclude_encodings:
73
+ warnings.warn(
74
+ "You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.",
75
+ stacklevel=3
76
+ )
77
+
78
+ # html5lib only parses HTML, so if it's given XML that's worth
79
+ # noting.
80
+ DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
81
+ markup, stacklevel=3
82
+ )
83
+
84
+ yield (markup, None, None, False)
85
+
86
+ # These methods are defined by Beautiful Soup.
87
+ def feed(self, markup):
88
+ if self.soup.parse_only is not None:
89
+ warnings.warn(
90
+ "You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.",
91
+ stacklevel=4
92
+ )
93
+ parser = html5lib.HTMLParser(tree=self.create_treebuilder)
94
+ self.underlying_builder.parser = parser
95
+ extra_kwargs = dict()
96
+ if not isinstance(markup, str):
97
+ if new_html5lib:
98
+ extra_kwargs['override_encoding'] = self.user_specified_encoding
99
+ else:
100
+ extra_kwargs['encoding'] = self.user_specified_encoding
101
+ doc = parser.parse(markup, **extra_kwargs)
102
+
103
+ # Set the character encoding detected by the tokenizer.
104
+ if isinstance(markup, str):
105
+ # We need to special-case this because html5lib sets
106
+ # charEncoding to UTF-8 if it gets Unicode input.
107
+ doc.original_encoding = None
108
+ else:
109
+ original_encoding = parser.tokenizer.stream.charEncoding[0]
110
+ if not isinstance(original_encoding, str):
111
+ # In 0.99999999 and up, the encoding is an html5lib
112
+ # Encoding object. We want to use a string for compatibility
113
+ # with other tree builders.
114
+ original_encoding = original_encoding.name
115
+ doc.original_encoding = original_encoding
116
+ self.underlying_builder.parser = None
117
+
118
+ def create_treebuilder(self, namespaceHTMLElements):
119
+ self.underlying_builder = TreeBuilderForHtml5lib(
120
+ namespaceHTMLElements, self.soup,
121
+ store_line_numbers=self.store_line_numbers
122
+ )
123
+ return self.underlying_builder
124
+
125
+ def test_fragment_to_document(self, fragment):
126
+ """See `TreeBuilder`."""
127
+ return '<html><head></head><body>%s</body></html>' % fragment
128
+
129
+
130
+ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
131
+
132
+ def __init__(self, namespaceHTMLElements, soup=None,
133
+ store_line_numbers=True, **kwargs):
134
+ if soup:
135
+ self.soup = soup
136
+ else:
137
+ from bs4 import BeautifulSoup
138
+ # TODO: Why is the parser 'html.parser' here? To avoid an
139
+ # infinite loop?
140
+ self.soup = BeautifulSoup(
141
+ "", "html.parser", store_line_numbers=store_line_numbers,
142
+ **kwargs
143
+ )
144
+ # TODO: What are **kwargs exactly? Should they be passed in
145
+ # here in addition to/instead of being passed to the BeautifulSoup
146
+ # constructor?
147
+ super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
148
+
149
+ # This will be set later to an html5lib.html5parser.HTMLParser
150
+ # object, which we can use to track the current line number.
151
+ self.parser = None
152
+ self.store_line_numbers = store_line_numbers
153
+
154
+ def documentClass(self):
155
+ self.soup.reset()
156
+ return Element(self.soup, self.soup, None)
157
+
158
+ def insertDoctype(self, token):
159
+ name = token["name"]
160
+ publicId = token["publicId"]
161
+ systemId = token["systemId"]
162
+
163
+ doctype = Doctype.for_name_and_ids(name, publicId, systemId)
164
+ self.soup.object_was_parsed(doctype)
165
+
166
+ def elementClass(self, name, namespace):
167
+ kwargs = {}
168
+ if self.parser and self.store_line_numbers:
169
+ # This represents the point immediately after the end of the
170
+ # tag. We don't know when the tag started, but we do know
171
+ # where it ended -- the character just before this one.
172
+ sourceline, sourcepos = self.parser.tokenizer.stream.position()
173
+ kwargs['sourceline'] = sourceline
174
+ kwargs['sourcepos'] = sourcepos-1
175
+ tag = self.soup.new_tag(name, namespace, **kwargs)
176
+
177
+ return Element(tag, self.soup, namespace)
178
+
179
+ def commentClass(self, data):
180
+ return TextNode(Comment(data), self.soup)
181
+
182
+ def fragmentClass(self):
183
+ from bs4 import BeautifulSoup
184
+ # TODO: Why is the parser 'html.parser' here? To avoid an
185
+ # infinite loop?
186
+ self.soup = BeautifulSoup("", "html.parser")
187
+ self.soup.name = "[document_fragment]"
188
+ return Element(self.soup, self.soup, None)
189
+
190
+ def appendChild(self, node):
191
+ # XXX This code is not covered by the BS4 tests.
192
+ self.soup.append(node.element)
193
+
194
+ def getDocument(self):
195
+ return self.soup
196
+
197
+ def getFragment(self):
198
+ return treebuilder_base.TreeBuilder.getFragment(self).element
199
+
200
+ def testSerializer(self, element):
201
+ from bs4 import BeautifulSoup
202
+ rv = []
203
+ doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')
204
+
205
+ def serializeElement(element, indent=0):
206
+ if isinstance(element, BeautifulSoup):
207
+ pass
208
+ if isinstance(element, Doctype):
209
+ m = doctype_re.match(element)
210
+ if m:
211
+ name = m.group(1)
212
+ if m.lastindex > 1:
213
+ publicId = m.group(2) or ""
214
+ systemId = m.group(3) or m.group(4) or ""
215
+ rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
216
+ (' ' * indent, name, publicId, systemId))
217
+ else:
218
+ rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name))
219
+ else:
220
+ rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
221
+ elif isinstance(element, Comment):
222
+ rv.append("|%s<!-- %s -->" % (' ' * indent, element))
223
+ elif isinstance(element, NavigableString):
224
+ rv.append("|%s\"%s\"" % (' ' * indent, element))
225
+ else:
226
+ if element.namespace:
227
+ name = "%s %s" % (prefixes[element.namespace],
228
+ element.name)
229
+ else:
230
+ name = element.name
231
+ rv.append("|%s<%s>" % (' ' * indent, name))
232
+ if element.attrs:
233
+ attributes = []
234
+ for name, value in list(element.attrs.items()):
235
+ if isinstance(name, NamespacedAttribute):
236
+ name = "%s %s" % (prefixes[name.namespace], name.name)
237
+ if isinstance(value, list):
238
+ value = " ".join(value)
239
+ attributes.append((name, value))
240
+
241
+ for name, value in sorted(attributes):
242
+ rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
243
+ indent += 2
244
+ for child in element.children:
245
+ serializeElement(child, indent)
246
+ serializeElement(element, 0)
247
+
248
+ return "\n".join(rv)
249
+
250
+ class AttrList(object):
251
+ def __init__(self, element):
252
+ self.element = element
253
+ self.attrs = dict(self.element.attrs)
254
+ def __iter__(self):
255
+ return list(self.attrs.items()).__iter__()
256
+ def __setitem__(self, name, value):
257
+ # If this attribute is a multi-valued attribute for this element,
258
+ # turn its value into a list.
259
+ list_attr = self.element.cdata_list_attributes or {}
260
+ if (name in list_attr.get('*', [])
261
+ or (self.element.name in list_attr
262
+ and name in list_attr.get(self.element.name, []))):
263
+ # A node that is being cloned may have already undergone
264
+ # this procedure.
265
+ if not isinstance(value, list):
266
+ value = nonwhitespace_re.findall(value)
267
+ self.element[name] = value
268
+ def items(self):
269
+ return list(self.attrs.items())
270
+ def keys(self):
271
+ return list(self.attrs.keys())
272
+ def __len__(self):
273
+ return len(self.attrs)
274
+ def __getitem__(self, name):
275
+ return self.attrs[name]
276
+ def __contains__(self, name):
277
+ return name in list(self.attrs.keys())
278
+
279
+
280
+ class Element(treebuilder_base.Node):
281
+ def __init__(self, element, soup, namespace):
282
+ treebuilder_base.Node.__init__(self, element.name)
283
+ self.element = element
284
+ self.soup = soup
285
+ self.namespace = namespace
286
+
287
+ def appendChild(self, node):
288
+ string_child = child = None
289
+ if isinstance(node, str):
290
+ # Some other piece of code decided to pass in a string
291
+ # instead of creating a TextElement object to contain the
292
+ # string.
293
+ string_child = child = node
294
+ elif isinstance(node, Tag):
295
+ # Some other piece of code decided to pass in a Tag
296
+ # instead of creating an Element object to contain the
297
+ # Tag.
298
+ child = node
299
+ elif node.element.__class__ == NavigableString:
300
+ string_child = child = node.element
301
+ node.parent = self
302
+ else:
303
+ child = node.element
304
+ node.parent = self
305
+
306
+ if not isinstance(child, str) and child.parent is not None:
307
+ node.element.extract()
308
+
309
+ if (string_child is not None and self.element.contents
310
+ and self.element.contents[-1].__class__ == NavigableString):
311
+ # We are appending a string onto another string.
312
+ # TODO This has O(n^2) performance, for input like
313
+ # "a</a>a</a>a</a>..."
314
+ old_element = self.element.contents[-1]
315
+ new_element = self.soup.new_string(old_element + string_child)
316
+ old_element.replace_with(new_element)
317
+ self.soup._most_recent_element = new_element
318
+ else:
319
+ if isinstance(node, str):
320
+ # Create a brand new NavigableString from this string.
321
+ child = self.soup.new_string(node)
322
+
323
+ # Tell Beautiful Soup to act as if it parsed this element
324
+ # immediately after the parent's last descendant. (Or
325
+ # immediately after the parent, if it has no children.)
326
+ if self.element.contents:
327
+ most_recent_element = self.element._last_descendant(False)
328
+ elif self.element.next_element is not None:
329
+ # Something from further ahead in the parse tree is
330
+ # being inserted into this earlier element. This is
331
+ # very annoying because it means an expensive search
332
+ # for the last element in the tree.
333
+ most_recent_element = self.soup._last_descendant()
334
+ else:
335
+ most_recent_element = self.element
336
+
337
+ self.soup.object_was_parsed(
338
+ child, parent=self.element,
339
+ most_recent_element=most_recent_element)
340
+
341
+ def getAttributes(self):
342
+ if isinstance(self.element, Comment):
343
+ return {}
344
+ return AttrList(self.element)
345
+
346
+ def setAttributes(self, attributes):
347
+ if attributes is not None and len(attributes) > 0:
348
+ converted_attributes = []
349
+ for name, value in list(attributes.items()):
350
+ if isinstance(name, tuple):
351
+ new_name = NamespacedAttribute(*name)
352
+ del attributes[name]
353
+ attributes[new_name] = value
354
+
355
+ self.soup.builder._replace_cdata_list_attribute_values(
356
+ self.name, attributes)
357
+ for name, value in list(attributes.items()):
358
+ self.element[name] = value
359
+
360
+ # The attributes may contain variables that need substitution.
361
+ # Call set_up_substitutions manually.
362
+ #
363
+ # The Tag constructor called this method when the Tag was created,
364
+ # but we just set/changed the attributes, so call it again.
365
+ self.soup.builder.set_up_substitutions(self.element)
366
+ attributes = property(getAttributes, setAttributes)
367
+
368
+ def insertText(self, data, insertBefore=None):
369
+ text = TextNode(self.soup.new_string(data), self.soup)
370
+ if insertBefore:
371
+ self.insertBefore(text, insertBefore)
372
+ else:
373
+ self.appendChild(text)
374
+
375
+ def insertBefore(self, node, refNode):
376
+ index = self.element.index(refNode.element)
377
+ if (node.element.__class__ == NavigableString and self.element.contents
378
+ and self.element.contents[index-1].__class__ == NavigableString):
379
+ # (See comments in appendChild)
380
+ old_node = self.element.contents[index-1]
381
+ new_str = self.soup.new_string(old_node + node.element)
382
+ old_node.replace_with(new_str)
383
+ else:
384
+ self.element.insert(index, node.element)
385
+ node.parent = self
386
+
387
+ def removeChild(self, node):
388
+ node.element.extract()
389
+
390
+ def reparentChildren(self, new_parent):
391
+ """Move all of this tag's children into another tag."""
392
+ # print("MOVE", self.element.contents)
393
+ # print("FROM", self.element)
394
+ # print("TO", new_parent.element)
395
+
396
+ element = self.element
397
+ new_parent_element = new_parent.element
398
+ # Determine what this tag's next_element will be once all the children
399
+ # are removed.
400
+ final_next_element = element.next_sibling
401
+
402
+ new_parents_last_descendant = new_parent_element._last_descendant(False, False)
403
+ if len(new_parent_element.contents) > 0:
404
+ # The new parent already contains children. We will be
405
+ # appending this tag's children to the end.
406
+ new_parents_last_child = new_parent_element.contents[-1]
407
+ new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
408
+ else:
409
+ # The new parent contains no children.
410
+ new_parents_last_child = None
411
+ new_parents_last_descendant_next_element = new_parent_element.next_element
412
+
413
+ to_append = element.contents
414
+ if len(to_append) > 0:
415
+ # Set the first child's previous_element and previous_sibling
416
+ # to elements within the new parent
417
+ first_child = to_append[0]
418
+ if new_parents_last_descendant is not None:
419
+ first_child.previous_element = new_parents_last_descendant
420
+ else:
421
+ first_child.previous_element = new_parent_element
422
+ first_child.previous_sibling = new_parents_last_child
423
+ if new_parents_last_descendant is not None:
424
+ new_parents_last_descendant.next_element = first_child
425
+ else:
426
+ new_parent_element.next_element = first_child
427
+ if new_parents_last_child is not None:
428
+ new_parents_last_child.next_sibling = first_child
429
+
430
+ # Find the very last element being moved. It is now the
431
+ # parent's last descendant. It has no .next_sibling and
432
+ # its .next_element is whatever the previous last
433
+ # descendant had.
434
+ last_childs_last_descendant = to_append[-1]._last_descendant(False, True)
435
+
436
+ last_childs_last_descendant.next_element = new_parents_last_descendant_next_element
437
+ if new_parents_last_descendant_next_element is not None:
438
+ # TODO: This code has no test coverage and I'm not sure
439
+ # how to get html5lib to go through this path, but it's
440
+ # just the other side of the previous line.
441
+ new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant
442
+ last_childs_last_descendant.next_sibling = None
443
+
444
+ for child in to_append:
445
+ child.parent = new_parent_element
446
+ new_parent_element.contents.append(child)
447
+
448
+ # Now that this element has no children, change its .next_element.
449
+ element.contents = []
450
+ element.next_element = final_next_element
451
+
452
+ # print("DONE WITH MOVE")
453
+ # print("FROM", self.element)
454
+ # print("TO", new_parent_element)
455
+
456
+ def cloneNode(self):
457
+ tag = self.soup.new_tag(self.element.name, self.namespace)
458
+ node = Element(tag, self.soup, self.namespace)
459
+ for key,value in self.attributes:
460
+ node.attributes[key] = value
461
+ return node
462
+
463
+ def hasContent(self):
464
+ return self.element.contents
465
+
466
+ def getNameTuple(self):
467
+ if self.namespace == None:
468
+ return namespaces["html"], self.name
469
+ else:
470
+ return self.namespace, self.name
471
+
472
+ nameTuple = property(getNameTuple)
473
+
474
+ class TextNode(Element):
475
+ def __init__(self, element, soup):
476
+ treebuilder_base.Node.__init__(self, None)
477
+ self.element = element
478
+ self.soup = soup
479
+
480
+ def cloneNode(self):
481
+ raise NotImplementedError
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/_htmlparser.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding: utf-8
2
+ """Use the HTMLParser library to parse HTML files that aren't too bad."""
3
+
4
+ # Use of this source code is governed by the MIT license.
5
+ __license__ = "MIT"
6
+
7
+ __all__ = [
8
+ 'HTMLParserTreeBuilder',
9
+ ]
10
+
11
+ from html.parser import HTMLParser
12
+
13
+ import sys
14
+ import warnings
15
+
16
+ from bs4.element import (
17
+ CData,
18
+ Comment,
19
+ Declaration,
20
+ Doctype,
21
+ ProcessingInstruction,
22
+ )
23
+ from bs4.dammit import EntitySubstitution, UnicodeDammit
24
+
25
+ from bs4.builder import (
26
+ DetectsXMLParsedAsHTML,
27
+ ParserRejectedMarkup,
28
+ HTML,
29
+ HTMLTreeBuilder,
30
+ STRICT,
31
+ )
32
+
33
+
34
+ HTMLPARSER = 'html.parser'
35
+
36
+ class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
37
+ """A subclass of the Python standard library's HTMLParser class, which
38
+ listens for HTMLParser events and translates them into calls
39
+ to Beautiful Soup's tree construction API.
40
+ """
41
+
42
+ # Strategies for handling duplicate attributes
43
+ IGNORE = 'ignore'
44
+ REPLACE = 'replace'
45
+
46
+ def __init__(self, *args, **kwargs):
47
+ """Constructor.
48
+
49
+ :param on_duplicate_attribute: A strategy for what to do if a
50
+ tag includes the same attribute more than once. Accepted
51
+ values are: REPLACE (replace earlier values with later
52
+ ones, the default), IGNORE (keep the earliest value
53
+ encountered), or a callable. A callable must take three
54
+ arguments: the dictionary of attributes already processed,
55
+ the name of the duplicate attribute, and the most recent value
56
+ encountered.
57
+ """
58
+ self.on_duplicate_attribute = kwargs.pop(
59
+ 'on_duplicate_attribute', self.REPLACE
60
+ )
61
+ HTMLParser.__init__(self, *args, **kwargs)
62
+
63
+ # Keep a list of empty-element tags that were encountered
64
+ # without an explicit closing tag. If we encounter a closing tag
65
+ # of this type, we'll associate it with one of those entries.
66
+ #
67
+ # This isn't a stack because we don't care about the
68
+ # order. It's a list of closing tags we've already handled and
69
+ # will ignore, assuming they ever show up.
70
+ self.already_closed_empty_element = []
71
+
72
+ self._initialize_xml_detector()
73
+
74
+ def error(self, message):
75
+ # NOTE: This method is required so long as Python 3.9 is
76
+ # supported. The corresponding code is removed from HTMLParser
77
+ # in 3.5, but not removed from ParserBase until 3.10.
78
+ # https://github.com/python/cpython/issues/76025
79
+ #
80
+ # The original implementation turned the error into a warning,
81
+ # but in every case I discovered, this made HTMLParser
82
+ # immediately crash with an error message that was less
83
+ # helpful than the warning. The new implementation makes it
84
+ # more clear that html.parser just can't parse this
85
+ # markup. The 3.10 implementation does the same, though it
86
+ # raises AssertionError rather than calling a method. (We
87
+ # catch this error and wrap it in a ParserRejectedMarkup.)
88
+ raise ParserRejectedMarkup(message)
89
+
90
+ def handle_startendtag(self, name, attrs):
91
+ """Handle an incoming empty-element tag.
92
+
93
+ This is only called when the markup looks like <tag/>.
94
+
95
+ :param name: Name of the tag.
96
+ :param attrs: Dictionary of the tag's attributes.
97
+ """
98
+ # is_startend() tells handle_starttag not to close the tag
99
+ # just because its name matches a known empty-element tag. We
100
+ # know that this is an empty-element tag and we want to call
101
+ # handle_endtag ourselves.
102
+ tag = self.handle_starttag(name, attrs, handle_empty_element=False)
103
+ self.handle_endtag(name)
104
+
105
+ def handle_starttag(self, name, attrs, handle_empty_element=True):
106
+ """Handle an opening tag, e.g. '<tag>'
107
+
108
+ :param name: Name of the tag.
109
+ :param attrs: Dictionary of the tag's attributes.
110
+ :param handle_empty_element: True if this tag is known to be
111
+ an empty-element tag (i.e. there is not expected to be any
112
+ closing tag).
113
+ """
114
+ # XXX namespace
115
+ attr_dict = {}
116
+ for key, value in attrs:
117
+ # Change None attribute values to the empty string
118
+ # for consistency with the other tree builders.
119
+ if value is None:
120
+ value = ''
121
+ if key in attr_dict:
122
+ # A single attribute shows up multiple times in this
123
+ # tag. How to handle it depends on the
124
+ # on_duplicate_attribute setting.
125
+ on_dupe = self.on_duplicate_attribute
126
+ if on_dupe == self.IGNORE:
127
+ pass
128
+ elif on_dupe in (None, self.REPLACE):
129
+ attr_dict[key] = value
130
+ else:
131
+ on_dupe(attr_dict, key, value)
132
+ else:
133
+ attr_dict[key] = value
134
+ attrvalue = '""'
135
+ #print("START", name)
136
+ sourceline, sourcepos = self.getpos()
137
+ tag = self.soup.handle_starttag(
138
+ name, None, None, attr_dict, sourceline=sourceline,
139
+ sourcepos=sourcepos
140
+ )
141
+ if tag and tag.is_empty_element and handle_empty_element:
142
+ # Unlike other parsers, html.parser doesn't send separate end tag
143
+ # events for empty-element tags. (It's handled in
144
+ # handle_startendtag, but only if the original markup looked like
145
+ # <tag/>.)
146
+ #
147
+ # So we need to call handle_endtag() ourselves. Since we
148
+ # know the start event is identical to the end event, we
149
+ # don't want handle_endtag() to cross off any previous end
150
+ # events for tags of this name.
151
+ self.handle_endtag(name, check_already_closed=False)
152
+
153
+ # But we might encounter an explicit closing tag for this tag
154
+ # later on. If so, we want to ignore it.
155
+ self.already_closed_empty_element.append(name)
156
+
157
+ if self._root_tag is None:
158
+ self._root_tag_encountered(name)
159
+
160
+ def handle_endtag(self, name, check_already_closed=True):
161
+ """Handle a closing tag, e.g. '</tag>'
162
+
163
+ :param name: A tag name.
164
+ :param check_already_closed: True if this tag is expected to
165
+ be the closing portion of an empty-element tag,
166
+ e.g. '<tag></tag>'.
167
+ """
168
+ #print("END", name)
169
+ if check_already_closed and name in self.already_closed_empty_element:
170
+ # This is a redundant end tag for an empty-element tag.
171
+ # We've already called handle_endtag() for it, so just
172
+ # check it off the list.
173
+ #print("ALREADY CLOSED", name)
174
+ self.already_closed_empty_element.remove(name)
175
+ else:
176
+ self.soup.handle_endtag(name)
177
+
178
+ def handle_data(self, data):
179
+ """Handle some textual data that shows up between tags."""
180
+ self.soup.handle_data(data)
181
+
182
+ def handle_charref(self, name):
183
+ """Handle a numeric character reference by converting it to the
184
+ corresponding Unicode character and treating it as textual
185
+ data.
186
+
187
+ :param name: Character number, possibly in hexadecimal.
188
+ """
189
+ # TODO: This was originally a workaround for a bug in
190
+ # HTMLParser. (http://bugs.python.org/issue13633) The bug has
191
+ # been fixed, but removing this code still makes some
192
+ # Beautiful Soup tests fail. This needs investigation.
193
+ if name.startswith('x'):
194
+ real_name = int(name.lstrip('x'), 16)
195
+ elif name.startswith('X'):
196
+ real_name = int(name.lstrip('X'), 16)
197
+ else:
198
+ real_name = int(name)
199
+
200
+ data = None
201
+ if real_name < 256:
202
+ # HTML numeric entities are supposed to reference Unicode
203
+ # code points, but sometimes they reference code points in
204
+ # some other encoding (ahem, Windows-1252). E.g. &#147;
205
+ # instead of &#201; for LEFT DOUBLE QUOTATION MARK. This
206
+ # code tries to detect this situation and compensate.
207
+ for encoding in (self.soup.original_encoding, 'windows-1252'):
208
+ if not encoding:
209
+ continue
210
+ try:
211
+ data = bytearray([real_name]).decode(encoding)
212
+ except UnicodeDecodeError as e:
213
+ pass
214
+ if not data:
215
+ try:
216
+ data = chr(real_name)
217
+ except (ValueError, OverflowError) as e:
218
+ pass
219
+ data = data or "\N{REPLACEMENT CHARACTER}"
220
+ self.handle_data(data)
221
+
222
+ def handle_entityref(self, name):
223
+ """Handle a named entity reference by converting it to the
224
+ corresponding Unicode character(s) and treating it as textual
225
+ data.
226
+
227
+ :param name: Name of the entity reference.
228
+ """
229
+ character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
230
+ if character is not None:
231
+ data = character
232
+ else:
233
+ # If this were XML, it would be ambiguous whether "&foo"
234
+ # was an character entity reference with a missing
235
+ # semicolon or the literal string "&foo". Since this is
236
+ # HTML, we have a complete list of all character entity references,
237
+ # and this one wasn't found, so assume it's the literal string "&foo".
238
+ data = "&%s" % name
239
+ self.handle_data(data)
240
+
241
+ def handle_comment(self, data):
242
+ """Handle an HTML comment.
243
+
244
+ :param data: The text of the comment.
245
+ """
246
+ self.soup.endData()
247
+ self.soup.handle_data(data)
248
+ self.soup.endData(Comment)
249
+
250
+ def handle_decl(self, data):
251
+ """Handle a DOCTYPE declaration.
252
+
253
+ :param data: The text of the declaration.
254
+ """
255
+ self.soup.endData()
256
+ data = data[len("DOCTYPE "):]
257
+ self.soup.handle_data(data)
258
+ self.soup.endData(Doctype)
259
+
260
+ def unknown_decl(self, data):
261
+ """Handle a declaration of unknown type -- probably a CDATA block.
262
+
263
+ :param data: The text of the declaration.
264
+ """
265
+ if data.upper().startswith('CDATA['):
266
+ cls = CData
267
+ data = data[len('CDATA['):]
268
+ else:
269
+ cls = Declaration
270
+ self.soup.endData()
271
+ self.soup.handle_data(data)
272
+ self.soup.endData(cls)
273
+
274
+ def handle_pi(self, data):
275
+ """Handle a processing instruction.
276
+
277
+ :param data: The text of the instruction.
278
+ """
279
+ self.soup.endData()
280
+ self.soup.handle_data(data)
281
+ self._document_might_be_xml(data)
282
+ self.soup.endData(ProcessingInstruction)
283
+
284
+
285
+ class HTMLParserTreeBuilder(HTMLTreeBuilder):
286
+ """A Beautiful soup `TreeBuilder` that uses the `HTMLParser` parser,
287
+ found in the Python standard library.
288
+ """
289
+ is_xml = False
290
+ picklable = True
291
+ NAME = HTMLPARSER
292
+ features = [NAME, HTML, STRICT]
293
+
294
+ # The html.parser knows which line number and position in the
295
+ # original file is the source of an element.
296
+ TRACKS_LINE_NUMBERS = True
297
+
298
+ def __init__(self, parser_args=None, parser_kwargs=None, **kwargs):
299
+ """Constructor.
300
+
301
+ :param parser_args: Positional arguments to pass into
302
+ the BeautifulSoupHTMLParser constructor, once it's
303
+ invoked.
304
+ :param parser_kwargs: Keyword arguments to pass into
305
+ the BeautifulSoupHTMLParser constructor, once it's
306
+ invoked.
307
+ :param kwargs: Keyword arguments for the superclass constructor.
308
+ """
309
+ # Some keyword arguments will be pulled out of kwargs and placed
310
+ # into parser_kwargs.
311
+ extra_parser_kwargs = dict()
312
+ for arg in ('on_duplicate_attribute',):
313
+ if arg in kwargs:
314
+ value = kwargs.pop(arg)
315
+ extra_parser_kwargs[arg] = value
316
+ super(HTMLParserTreeBuilder, self).__init__(**kwargs)
317
+ parser_args = parser_args or []
318
+ parser_kwargs = parser_kwargs or {}
319
+ parser_kwargs.update(extra_parser_kwargs)
320
+ parser_kwargs['convert_charrefs'] = False
321
+ self.parser_args = (parser_args, parser_kwargs)
322
+
323
+ def prepare_markup(self, markup, user_specified_encoding=None,
324
+ document_declared_encoding=None, exclude_encodings=None):
325
+
326
+ """Run any preliminary steps necessary to make incoming markup
327
+ acceptable to the parser.
328
+
329
+ :param markup: Some markup -- probably a bytestring.
330
+ :param user_specified_encoding: The user asked to try this encoding.
331
+ :param document_declared_encoding: The markup itself claims to be
332
+ in this encoding.
333
+ :param exclude_encodings: The user asked _not_ to try any of
334
+ these encodings.
335
+
336
+ :yield: A series of 4-tuples:
337
+ (markup, encoding, declared encoding,
338
+ has undergone character replacement)
339
+
340
+ Each 4-tuple represents a strategy for converting the
341
+ document to Unicode and parsing it. Each strategy will be tried
342
+ in turn.
343
+ """
344
+ if isinstance(markup, str):
345
+ # Parse Unicode as-is.
346
+ yield (markup, None, None, False)
347
+ return
348
+
349
+ # Ask UnicodeDammit to sniff the most likely encoding.
350
+
351
+ # This was provided by the end-user; treat it as a known
352
+ # definite encoding per the algorithm laid out in the HTML5
353
+ # spec. (See the EncodingDetector class for details.)
354
+ known_definite_encodings = [user_specified_encoding]
355
+
356
+ # This was found in the document; treat it as a slightly lower-priority
357
+ # user encoding.
358
+ user_encodings = [document_declared_encoding]
359
+
360
+ try_encodings = [user_specified_encoding, document_declared_encoding]
361
+ dammit = UnicodeDammit(
362
+ markup,
363
+ known_definite_encodings=known_definite_encodings,
364
+ user_encodings=user_encodings,
365
+ is_html=True,
366
+ exclude_encodings=exclude_encodings
367
+ )
368
+ yield (dammit.markup, dammit.original_encoding,
369
+ dammit.declared_html_encoding,
370
+ dammit.contains_replacement_characters)
371
+
372
+ def feed(self, markup):
373
+ """Run some incoming markup through some parsing process,
374
+ populating the `BeautifulSoup` object in self.soup.
375
+ """
376
+ args, kwargs = self.parser_args
377
+ parser = BeautifulSoupHTMLParser(*args, **kwargs)
378
+ parser.soup = self.soup
379
+ try:
380
+ parser.feed(markup)
381
+ parser.close()
382
+ except AssertionError as e:
383
+ # html.parser raises AssertionError in rare cases to
384
+ # indicate a fatal problem with the markup, especially
385
+ # when there's an error in the doctype declaration.
386
+ raise ParserRejectedMarkup(e)
387
+ parser.already_closed_empty_element = []
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/builder/_lxml.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use of this source code is governed by the MIT license.
2
+ __license__ = "MIT"
3
+
4
+ __all__ = [
5
+ 'LXMLTreeBuilderForXML',
6
+ 'LXMLTreeBuilder',
7
+ ]
8
+
9
+ try:
10
+ from collections.abc import Callable # Python 3.6
11
+ except ImportError as e:
12
+ from collections import Callable
13
+
14
+ from io import BytesIO
15
+ from io import StringIO
16
+ from lxml import etree
17
+ from bs4.element import (
18
+ Comment,
19
+ Doctype,
20
+ NamespacedAttribute,
21
+ ProcessingInstruction,
22
+ XMLProcessingInstruction,
23
+ )
24
+ from bs4.builder import (
25
+ DetectsXMLParsedAsHTML,
26
+ FAST,
27
+ HTML,
28
+ HTMLTreeBuilder,
29
+ PERMISSIVE,
30
+ ParserRejectedMarkup,
31
+ TreeBuilder,
32
+ XML)
33
+ from bs4.dammit import EncodingDetector
34
+
35
+ LXML = 'lxml'
36
+
37
+ def _invert(d):
38
+ "Invert a dictionary."
39
+ return dict((v,k) for k, v in list(d.items()))
40
+
41
+ class LXMLTreeBuilderForXML(TreeBuilder):
42
+ DEFAULT_PARSER_CLASS = etree.XMLParser
43
+
44
+ is_xml = True
45
+ processing_instruction_class = XMLProcessingInstruction
46
+
47
+ NAME = "lxml-xml"
48
+ ALTERNATE_NAMES = ["xml"]
49
+
50
+ # Well, it's permissive by XML parser standards.
51
+ features = [NAME, LXML, XML, FAST, PERMISSIVE]
52
+
53
+ CHUNK_SIZE = 512
54
+
55
+ # This namespace mapping is specified in the XML Namespace
56
+ # standard.
57
+ DEFAULT_NSMAPS = dict(xml='http://www.w3.org/XML/1998/namespace')
58
+
59
+ DEFAULT_NSMAPS_INVERTED = _invert(DEFAULT_NSMAPS)
60
+
61
+ # NOTE: If we parsed Element objects and looked at .sourceline,
62
+ # we'd be able to see the line numbers from the original document.
63
+ # But instead we build an XMLParser or HTMLParser object to serve
64
+ # as the target of parse messages, and those messages don't include
65
+ # line numbers.
66
+ # See: https://bugs.launchpad.net/lxml/+bug/1846906
67
+
68
+ def initialize_soup(self, soup):
69
+ """Let the BeautifulSoup object know about the standard namespace
70
+ mapping.
71
+
72
+ :param soup: A `BeautifulSoup`.
73
+ """
74
+ super(LXMLTreeBuilderForXML, self).initialize_soup(soup)
75
+ self._register_namespaces(self.DEFAULT_NSMAPS)
76
+
77
+ def _register_namespaces(self, mapping):
78
+ """Let the BeautifulSoup object know about namespaces encountered
79
+ while parsing the document.
80
+
81
+ This might be useful later on when creating CSS selectors.
82
+
83
+ This will track (almost) all namespaces, even ones that were
84
+ only in scope for part of the document. If two namespaces have
85
+ the same prefix, only the first one encountered will be
86
+ tracked. Un-prefixed namespaces are not tracked.
87
+
88
+ :param mapping: A dictionary mapping namespace prefixes to URIs.
89
+ """
90
+ for key, value in list(mapping.items()):
91
+ # This is 'if key' and not 'if key is not None' because we
92
+ # don't track un-prefixed namespaces. Soupselect will
93
+ # treat an un-prefixed namespace as the default, which
94
+ # causes confusion in some cases.
95
+ if key and key not in self.soup._namespaces:
96
+ # Let the BeautifulSoup object know about a new namespace.
97
+ # If there are multiple namespaces defined with the same
98
+ # prefix, the first one in the document takes precedence.
99
+ self.soup._namespaces[key] = value
100
+
101
+ def default_parser(self, encoding):
102
+ """Find the default parser for the given encoding.
103
+
104
+ :param encoding: A string.
105
+ :return: Either a parser object or a class, which
106
+ will be instantiated with default arguments.
107
+ """
108
+ if self._default_parser is not None:
109
+ return self._default_parser
110
+ return etree.XMLParser(
111
+ target=self, strip_cdata=False, recover=True, encoding=encoding)
112
+
113
+ def parser_for(self, encoding):
114
+ """Instantiate an appropriate parser for the given encoding.
115
+
116
+ :param encoding: A string.
117
+ :return: A parser object such as an `etree.XMLParser`.
118
+ """
119
+ # Use the default parser.
120
+ parser = self.default_parser(encoding)
121
+
122
+ if isinstance(parser, Callable):
123
+ # Instantiate the parser with default arguments
124
+ parser = parser(
125
+ target=self, strip_cdata=False, recover=True, encoding=encoding
126
+ )
127
+ return parser
128
+
129
+ def __init__(self, parser=None, empty_element_tags=None, **kwargs):
130
+ # TODO: Issue a warning if parser is present but not a
131
+ # callable, since that means there's no way to create new
132
+ # parsers for different encodings.
133
+ self._default_parser = parser
134
+ if empty_element_tags is not None:
135
+ self.empty_element_tags = set(empty_element_tags)
136
+ self.soup = None
137
+ self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
138
+ self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
139
+ super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
140
+
141
+ def _getNsTag(self, tag):
142
+ # Split the namespace URL out of a fully-qualified lxml tag
143
+ # name. Copied from lxml's src/lxml/sax.py.
144
+ if tag[0] == '{':
145
+ return tuple(tag[1:].split('}', 1))
146
+ else:
147
+ return (None, tag)
148
+
149
+ def prepare_markup(self, markup, user_specified_encoding=None,
150
+ exclude_encodings=None,
151
+ document_declared_encoding=None):
152
+ """Run any preliminary steps necessary to make incoming markup
153
+ acceptable to the parser.
154
+
155
+ lxml really wants to get a bytestring and convert it to
156
+ Unicode itself. So instead of using UnicodeDammit to convert
157
+ the bytestring to Unicode using different encodings, this
158
+ implementation uses EncodingDetector to iterate over the
159
+ encodings, and tell lxml to try to parse the document as each
160
+ one in turn.
161
+
162
+ :param markup: Some markup -- hopefully a bytestring.
163
+ :param user_specified_encoding: The user asked to try this encoding.
164
+ :param document_declared_encoding: The markup itself claims to be
165
+ in this encoding.
166
+ :param exclude_encodings: The user asked _not_ to try any of
167
+ these encodings.
168
+
169
+ :yield: A series of 4-tuples:
170
+ (markup, encoding, declared encoding,
171
+ has undergone character replacement)
172
+
173
+ Each 4-tuple represents a strategy for converting the
174
+ document to Unicode and parsing it. Each strategy will be tried
175
+ in turn.
176
+ """
177
+ is_html = not self.is_xml
178
+ if is_html:
179
+ self.processing_instruction_class = ProcessingInstruction
180
+ # We're in HTML mode, so if we're given XML, that's worth
181
+ # noting.
182
+ DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
183
+ markup, stacklevel=3
184
+ )
185
+ else:
186
+ self.processing_instruction_class = XMLProcessingInstruction
187
+
188
+ if isinstance(markup, str):
189
+ # We were given Unicode. Maybe lxml can parse Unicode on
190
+ # this system?
191
+
192
+ # TODO: This is a workaround for
193
+ # https://bugs.launchpad.net/lxml/+bug/1948551.
194
+ # We can remove it once the upstream issue is fixed.
195
+ if len(markup) > 0 and markup[0] == u'\N{BYTE ORDER MARK}':
196
+ markup = markup[1:]
197
+ yield markup, None, document_declared_encoding, False
198
+
199
+ if isinstance(markup, str):
200
+ # No, apparently not. Convert the Unicode to UTF-8 and
201
+ # tell lxml to parse it as UTF-8.
202
+ yield (markup.encode("utf8"), "utf8",
203
+ document_declared_encoding, False)
204
+
205
+ # This was provided by the end-user; treat it as a known
206
+ # definite encoding per the algorithm laid out in the HTML5
207
+ # spec. (See the EncodingDetector class for details.)
208
+ known_definite_encodings = [user_specified_encoding]
209
+
210
+ # This was found in the document; treat it as a slightly lower-priority
211
+ # user encoding.
212
+ user_encodings = [document_declared_encoding]
213
+ detector = EncodingDetector(
214
+ markup, known_definite_encodings=known_definite_encodings,
215
+ user_encodings=user_encodings, is_html=is_html,
216
+ exclude_encodings=exclude_encodings
217
+ )
218
+ for encoding in detector.encodings:
219
+ yield (detector.markup, encoding, document_declared_encoding, False)
220
+
221
+ def feed(self, markup):
222
+ if isinstance(markup, bytes):
223
+ markup = BytesIO(markup)
224
+ elif isinstance(markup, str):
225
+ markup = StringIO(markup)
226
+
227
+ # Call feed() at least once, even if the markup is empty,
228
+ # or the parser won't be initialized.
229
+ data = markup.read(self.CHUNK_SIZE)
230
+ try:
231
+ self.parser = self.parser_for(self.soup.original_encoding)
232
+ self.parser.feed(data)
233
+ while len(data) != 0:
234
+ # Now call feed() on the rest of the data, chunk by chunk.
235
+ data = markup.read(self.CHUNK_SIZE)
236
+ if len(data) != 0:
237
+ self.parser.feed(data)
238
+ self.parser.close()
239
+ except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
240
+ raise ParserRejectedMarkup(e)
241
+
242
+ def close(self):
243
+ self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
244
+
245
+ def start(self, name, attrs, nsmap={}):
246
+ # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
247
+ attrs = dict(attrs)
248
+ nsprefix = None
249
+ # Invert each namespace map as it comes in.
250
+ if len(nsmap) == 0 and len(self.nsmaps) > 1:
251
+ # There are no new namespaces for this tag, but
252
+ # non-default namespaces are in play, so we need a
253
+ # separate tag stack to know when they end.
254
+ self.nsmaps.append(None)
255
+ elif len(nsmap) > 0:
256
+ # A new namespace mapping has come into play.
257
+
258
+ # First, Let the BeautifulSoup object know about it.
259
+ self._register_namespaces(nsmap)
260
+
261
+ # Then, add it to our running list of inverted namespace
262
+ # mappings.
263
+ self.nsmaps.append(_invert(nsmap))
264
+
265
+ # The currently active namespace prefixes have
266
+ # changed. Calculate the new mapping so it can be stored
267
+ # with all Tag objects created while these prefixes are in
268
+ # scope.
269
+ current_mapping = dict(self.active_namespace_prefixes[-1])
270
+ current_mapping.update(nsmap)
271
+
272
+ # We should not track un-prefixed namespaces as we can only hold one
273
+ # and it will be recognized as the default namespace by soupsieve,
274
+ # which may be confusing in some situations.
275
+ if '' in current_mapping:
276
+ del current_mapping['']
277
+ self.active_namespace_prefixes.append(current_mapping)
278
+
279
+ # Also treat the namespace mapping as a set of attributes on the
280
+ # tag, so we can recreate it later.
281
+ attrs = attrs.copy()
282
+ for prefix, namespace in list(nsmap.items()):
283
+ attribute = NamespacedAttribute(
284
+ "xmlns", prefix, "http://www.w3.org/2000/xmlns/")
285
+ attrs[attribute] = namespace
286
+
287
+ # Namespaces are in play. Find any attributes that came in
288
+ # from lxml with namespaces attached to their names, and
289
+ # turn then into NamespacedAttribute objects.
290
+ new_attrs = {}
291
+ for attr, value in list(attrs.items()):
292
+ namespace, attr = self._getNsTag(attr)
293
+ if namespace is None:
294
+ new_attrs[attr] = value
295
+ else:
296
+ nsprefix = self._prefix_for_namespace(namespace)
297
+ attr = NamespacedAttribute(nsprefix, attr, namespace)
298
+ new_attrs[attr] = value
299
+ attrs = new_attrs
300
+
301
+ namespace, name = self._getNsTag(name)
302
+ nsprefix = self._prefix_for_namespace(namespace)
303
+ self.soup.handle_starttag(
304
+ name, namespace, nsprefix, attrs,
305
+ namespaces=self.active_namespace_prefixes[-1]
306
+ )
307
+
308
+ def _prefix_for_namespace(self, namespace):
309
+ """Find the currently active prefix for the given namespace."""
310
+ if namespace is None:
311
+ return None
312
+ for inverted_nsmap in reversed(self.nsmaps):
313
+ if inverted_nsmap is not None and namespace in inverted_nsmap:
314
+ return inverted_nsmap[namespace]
315
+ return None
316
+
317
+ def end(self, name):
318
+ self.soup.endData()
319
+ completed_tag = self.soup.tagStack[-1]
320
+ namespace, name = self._getNsTag(name)
321
+ nsprefix = None
322
+ if namespace is not None:
323
+ for inverted_nsmap in reversed(self.nsmaps):
324
+ if inverted_nsmap is not None and namespace in inverted_nsmap:
325
+ nsprefix = inverted_nsmap[namespace]
326
+ break
327
+ self.soup.handle_endtag(name, nsprefix)
328
+ if len(self.nsmaps) > 1:
329
+ # This tag, or one of its parents, introduced a namespace
330
+ # mapping, so pop it off the stack.
331
+ out_of_scope_nsmap = self.nsmaps.pop()
332
+
333
+ if out_of_scope_nsmap is not None:
334
+ # This tag introduced a namespace mapping which is no
335
+ # longer in scope. Recalculate the currently active
336
+ # namespace prefixes.
337
+ self.active_namespace_prefixes.pop()
338
+
339
+ def pi(self, target, data):
340
+ self.soup.endData()
341
+ data = target + ' ' + data
342
+ self.soup.handle_data(data)
343
+ self.soup.endData(self.processing_instruction_class)
344
+
345
+ def data(self, content):
346
+ self.soup.handle_data(content)
347
+
348
+ def doctype(self, name, pubid, system):
349
+ self.soup.endData()
350
+ doctype = Doctype.for_name_and_ids(name, pubid, system)
351
+ self.soup.object_was_parsed(doctype)
352
+
353
+ def comment(self, content):
354
+ "Handle comments as Comment objects."
355
+ self.soup.endData()
356
+ self.soup.handle_data(content)
357
+ self.soup.endData(Comment)
358
+
359
+ def test_fragment_to_document(self, fragment):
360
+ """See `TreeBuilder`."""
361
+ return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
362
+
363
+
364
+ class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
365
+
366
+ NAME = LXML
367
+ ALTERNATE_NAMES = ["lxml-html"]
368
+
369
+ features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
370
+ is_xml = False
371
+ processing_instruction_class = ProcessingInstruction
372
+
373
+ def default_parser(self, encoding):
374
+ return etree.HTMLParser
375
+
376
+ def feed(self, markup):
377
+ encoding = self.soup.original_encoding
378
+ try:
379
+ self.parser = self.parser_for(encoding)
380
+ self.parser.feed(markup)
381
+ self.parser.close()
382
+ except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
383
+ raise ParserRejectedMarkup(e)
384
+
385
+
386
+ def test_fragment_to_document(self, fragment):
387
+ """See `TreeBuilder`."""
388
+ return '<html><body>%s</body></html>' % fragment
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/css.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Integration code for CSS selectors using Soup Sieve (pypi: soupsieve)."""
2
+
3
+ import warnings
4
+ try:
5
+ import soupsieve
6
+ except ImportError as e:
7
+ soupsieve = None
8
+ warnings.warn(
9
+ 'The soupsieve package is not installed. CSS selectors cannot be used.'
10
+ )
11
+
12
+
13
+ class CSS(object):
14
+ """A proxy object against the soupsieve library, to simplify its
15
+ CSS selector API.
16
+
17
+ Acquire this object through the .css attribute on the
18
+ BeautifulSoup object, or on the Tag you want to use as the
19
+ starting point for a CSS selector.
20
+
21
+ The main advantage of doing this is that the tag to be selected
22
+ against doesn't need to be explicitly specified in the function
23
+ calls, since it's already scoped to a tag.
24
+ """
25
+
26
+ def __init__(self, tag, api=soupsieve):
27
+ """Constructor.
28
+
29
+ You don't need to instantiate this class yourself; instead,
30
+ access the .css attribute on the BeautifulSoup object, or on
31
+ the Tag you want to use as the starting point for your CSS
32
+ selector.
33
+
34
+ :param tag: All CSS selectors will use this as their starting
35
+ point.
36
+
37
+ :param api: A plug-in replacement for the soupsieve module,
38
+ designed mainly for use in tests.
39
+ """
40
+ if api is None:
41
+ raise NotImplementedError(
42
+ "Cannot execute CSS selectors because the soupsieve package is not installed."
43
+ )
44
+ self.api = api
45
+ self.tag = tag
46
+
47
+ def escape(self, ident):
48
+ """Escape a CSS identifier.
49
+
50
+ This is a simple wrapper around soupselect.escape(). See the
51
+ documentation for that function for more information.
52
+ """
53
+ if soupsieve is None:
54
+ raise NotImplementedError(
55
+ "Cannot escape CSS identifiers because the soupsieve package is not installed."
56
+ )
57
+ return self.api.escape(ident)
58
+
59
+ def _ns(self, ns, select):
60
+ """Normalize a dictionary of namespaces."""
61
+ if not isinstance(select, self.api.SoupSieve) and ns is None:
62
+ # If the selector is a precompiled pattern, it already has
63
+ # a namespace context compiled in, which cannot be
64
+ # replaced.
65
+ ns = self.tag._namespaces
66
+ return ns
67
+
68
+ def _rs(self, results):
69
+ """Normalize a list of results to a Resultset.
70
+
71
+ A ResultSet is more consistent with the rest of Beautiful
72
+ Soup's API, and ResultSet.__getattr__ has a helpful error
73
+ message if you try to treat a list of results as a single
74
+ result (a common mistake).
75
+ """
76
+ # Import here to avoid circular import
77
+ from bs4.element import ResultSet
78
+ return ResultSet(None, results)
79
+
80
+ def compile(self, select, namespaces=None, flags=0, **kwargs):
81
+ """Pre-compile a selector and return the compiled object.
82
+
83
+ :param selector: A CSS selector.
84
+
85
+ :param namespaces: A dictionary mapping namespace prefixes
86
+ used in the CSS selector to namespace URIs. By default,
87
+ Beautiful Soup will use the prefixes it encountered while
88
+ parsing the document.
89
+
90
+ :param flags: Flags to be passed into Soup Sieve's
91
+ soupsieve.compile() method.
92
+
93
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
94
+ soupsieve.compile() method.
95
+
96
+ :return: A precompiled selector object.
97
+ :rtype: soupsieve.SoupSieve
98
+ """
99
+ return self.api.compile(
100
+ select, self._ns(namespaces, select), flags, **kwargs
101
+ )
102
+
103
+ def select_one(self, select, namespaces=None, flags=0, **kwargs):
104
+ """Perform a CSS selection operation on the current Tag and return the
105
+ first result.
106
+
107
+ This uses the Soup Sieve library. For more information, see
108
+ that library's documentation for the soupsieve.select_one()
109
+ method.
110
+
111
+ :param selector: A CSS selector.
112
+
113
+ :param namespaces: A dictionary mapping namespace prefixes
114
+ used in the CSS selector to namespace URIs. By default,
115
+ Beautiful Soup will use the prefixes it encountered while
116
+ parsing the document.
117
+
118
+ :param flags: Flags to be passed into Soup Sieve's
119
+ soupsieve.select_one() method.
120
+
121
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
122
+ soupsieve.select_one() method.
123
+
124
+ :return: A Tag, or None if the selector has no match.
125
+ :rtype: bs4.element.Tag
126
+
127
+ """
128
+ return self.api.select_one(
129
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
130
+ )
131
+
132
+ def select(self, select, namespaces=None, limit=0, flags=0, **kwargs):
133
+ """Perform a CSS selection operation on the current Tag.
134
+
135
+ This uses the Soup Sieve library. For more information, see
136
+ that library's documentation for the soupsieve.select()
137
+ method.
138
+
139
+ :param selector: A string containing a CSS selector.
140
+
141
+ :param namespaces: A dictionary mapping namespace prefixes
142
+ used in the CSS selector to namespace URIs. By default,
143
+ Beautiful Soup will pass in the prefixes it encountered while
144
+ parsing the document.
145
+
146
+ :param limit: After finding this number of results, stop looking.
147
+
148
+ :param flags: Flags to be passed into Soup Sieve's
149
+ soupsieve.select() method.
150
+
151
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
152
+ soupsieve.select() method.
153
+
154
+ :return: A ResultSet of Tag objects.
155
+ :rtype: bs4.element.ResultSet
156
+
157
+ """
158
+ if limit is None:
159
+ limit = 0
160
+
161
+ return self._rs(
162
+ self.api.select(
163
+ select, self.tag, self._ns(namespaces, select), limit, flags,
164
+ **kwargs
165
+ )
166
+ )
167
+
168
+ def iselect(self, select, namespaces=None, limit=0, flags=0, **kwargs):
169
+ """Perform a CSS selection operation on the current Tag.
170
+
171
+ This uses the Soup Sieve library. For more information, see
172
+ that library's documentation for the soupsieve.iselect()
173
+ method. It is the same as select(), but it returns a generator
174
+ instead of a list.
175
+
176
+ :param selector: A string containing a CSS selector.
177
+
178
+ :param namespaces: A dictionary mapping namespace prefixes
179
+ used in the CSS selector to namespace URIs. By default,
180
+ Beautiful Soup will pass in the prefixes it encountered while
181
+ parsing the document.
182
+
183
+ :param limit: After finding this number of results, stop looking.
184
+
185
+ :param flags: Flags to be passed into Soup Sieve's
186
+ soupsieve.iselect() method.
187
+
188
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
189
+ soupsieve.iselect() method.
190
+
191
+ :return: A generator
192
+ :rtype: types.GeneratorType
193
+ """
194
+ return self.api.iselect(
195
+ select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
196
+ )
197
+
198
+ def closest(self, select, namespaces=None, flags=0, **kwargs):
199
+ """Find the Tag closest to this one that matches the given selector.
200
+
201
+ This uses the Soup Sieve library. For more information, see
202
+ that library's documentation for the soupsieve.closest()
203
+ method.
204
+
205
+ :param selector: A string containing a CSS selector.
206
+
207
+ :param namespaces: A dictionary mapping namespace prefixes
208
+ used in the CSS selector to namespace URIs. By default,
209
+ Beautiful Soup will pass in the prefixes it encountered while
210
+ parsing the document.
211
+
212
+ :param flags: Flags to be passed into Soup Sieve's
213
+ soupsieve.closest() method.
214
+
215
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
216
+ soupsieve.closest() method.
217
+
218
+ :return: A Tag, or None if there is no match.
219
+ :rtype: bs4.Tag
220
+
221
+ """
222
+ return self.api.closest(
223
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
224
+ )
225
+
226
+ def match(self, select, namespaces=None, flags=0, **kwargs):
227
+ """Check whether this Tag matches the given CSS selector.
228
+
229
+ This uses the Soup Sieve library. For more information, see
230
+ that library's documentation for the soupsieve.match()
231
+ method.
232
+
233
+ :param: a CSS selector.
234
+
235
+ :param namespaces: A dictionary mapping namespace prefixes
236
+ used in the CSS selector to namespace URIs. By default,
237
+ Beautiful Soup will pass in the prefixes it encountered while
238
+ parsing the document.
239
+
240
+ :param flags: Flags to be passed into Soup Sieve's
241
+ soupsieve.match() method.
242
+
243
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
244
+ soupsieve.match() method.
245
+
246
+ :return: True if this Tag matches the selector; False otherwise.
247
+ :rtype: bool
248
+ """
249
+ return self.api.match(
250
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
251
+ )
252
+
253
+ def filter(self, select, namespaces=None, flags=0, **kwargs):
254
+ """Filter this Tag's direct children based on the given CSS selector.
255
+
256
+ This uses the Soup Sieve library. It works the same way as
257
+ passing this Tag into that library's soupsieve.filter()
258
+ method. More information, for more information see the
259
+ documentation for soupsieve.filter().
260
+
261
+ :param namespaces: A dictionary mapping namespace prefixes
262
+ used in the CSS selector to namespace URIs. By default,
263
+ Beautiful Soup will pass in the prefixes it encountered while
264
+ parsing the document.
265
+
266
+ :param flags: Flags to be passed into Soup Sieve's
267
+ soupsieve.filter() method.
268
+
269
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
270
+ soupsieve.filter() method.
271
+
272
+ :return: A ResultSet of Tag objects.
273
+ :rtype: bs4.element.ResultSet
274
+
275
+ """
276
+ return self._rs(
277
+ self.api.filter(
278
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
279
+ )
280
+ )
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/dammit.py ADDED
@@ -0,0 +1,1095 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Beautiful Soup bonus library: Unicode, Dammit
3
+
4
+ This library converts a bytestream to Unicode through any means
5
+ necessary. It is heavily based on code from Mark Pilgrim's Universal
6
+ Feed Parser. It works best on XML and HTML, but it does not rewrite the
7
+ XML or HTML to reflect a new encoding; that's the tree builder's job.
8
+ """
9
+ # Use of this source code is governed by the MIT license.
10
+ __license__ = "MIT"
11
+
12
+ from html.entities import codepoint2name
13
+ from collections import defaultdict
14
+ import codecs
15
+ import re
16
+ import logging
17
+ import string
18
+
19
+ # Import a library to autodetect character encodings. We'll support
20
+ # any of a number of libraries that all support the same API:
21
+ #
22
+ # * cchardet
23
+ # * chardet
24
+ # * charset-normalizer
25
+ chardet_module = None
26
+ try:
27
+ # PyPI package: cchardet
28
+ import cchardet as chardet_module
29
+ except ImportError:
30
+ try:
31
+ # Debian package: python-chardet
32
+ # PyPI package: chardet
33
+ import chardet as chardet_module
34
+ except ImportError:
35
+ try:
36
+ # PyPI package: charset-normalizer
37
+ import charset_normalizer as chardet_module
38
+ except ImportError:
39
+ # No chardet available.
40
+ chardet_module = None
41
+
42
+ if chardet_module:
43
+ def chardet_dammit(s):
44
+ if isinstance(s, str):
45
+ return None
46
+ return chardet_module.detect(s)['encoding']
47
+ else:
48
+ def chardet_dammit(s):
49
+ return None
50
+
51
+ # Build bytestring and Unicode versions of regular expressions for finding
52
+ # a declared encoding inside an XML or HTML document.
53
+ xml_encoding = '^\\s*<\\?.*encoding=[\'"](.*?)[\'"].*\\?>'
54
+ html_meta = '<\\s*meta[^>]+charset\\s*=\\s*["\']?([^>]*?)[ /;\'">]'
55
+ encoding_res = dict()
56
+ encoding_res[bytes] = {
57
+ 'html' : re.compile(html_meta.encode("ascii"), re.I),
58
+ 'xml' : re.compile(xml_encoding.encode("ascii"), re.I),
59
+ }
60
+ encoding_res[str] = {
61
+ 'html' : re.compile(html_meta, re.I),
62
+ 'xml' : re.compile(xml_encoding, re.I)
63
+ }
64
+
65
+ from html.entities import html5
66
+
67
+ class EntitySubstitution(object):
68
+ """The ability to substitute XML or HTML entities for certain characters."""
69
+
70
+ def _populate_class_variables():
71
+ """Initialize variables used by this class to manage the plethora of
72
+ HTML5 named entities.
73
+
74
+ This function returns a 3-tuple containing two dictionaries
75
+ and a regular expression:
76
+
77
+ unicode_to_name - A mapping of Unicode strings like "⦨" to
78
+ entity names like "angmsdaa". When a single Unicode string has
79
+ multiple entity names, we try to choose the most commonly-used
80
+ name.
81
+
82
+ name_to_unicode: A mapping of entity names like "angmsdaa" to
83
+ Unicode strings like "⦨".
84
+
85
+ named_entity_re: A regular expression matching (almost) any
86
+ Unicode string that corresponds to an HTML5 named entity.
87
+ """
88
+ unicode_to_name = {}
89
+ name_to_unicode = {}
90
+
91
+ short_entities = set()
92
+ long_entities_by_first_character = defaultdict(set)
93
+
94
+ for name_with_semicolon, character in sorted(html5.items()):
95
+ # "It is intentional, for legacy compatibility, that many
96
+ # code points have multiple character reference names. For
97
+ # example, some appear both with and without the trailing
98
+ # semicolon, or with different capitalizations."
99
+ # - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references
100
+ #
101
+ # The parsers are in charge of handling (or not) character
102
+ # references with no trailing semicolon, so we remove the
103
+ # semicolon whenever it appears.
104
+ if name_with_semicolon.endswith(';'):
105
+ name = name_with_semicolon[:-1]
106
+ else:
107
+ name = name_with_semicolon
108
+
109
+ # When parsing HTML, we want to recognize any known named
110
+ # entity and convert it to a sequence of Unicode
111
+ # characters.
112
+ if name not in name_to_unicode:
113
+ name_to_unicode[name] = character
114
+
115
+ # When _generating_ HTML, we want to recognize special
116
+ # character sequences that _could_ be converted to named
117
+ # entities.
118
+ unicode_to_name[character] = name
119
+
120
+ # We also need to build a regular expression that lets us
121
+ # _find_ those characters in output strings so we can
122
+ # replace them.
123
+ #
124
+ # This is tricky, for two reasons.
125
+
126
+ if (len(character) == 1 and ord(character) < 128
127
+ and character not in '<>&'):
128
+ # First, it would be annoying to turn single ASCII
129
+ # characters like | into named entities like
130
+ # &verbar;. The exceptions are <>&, which we _must_
131
+ # turn into named entities to produce valid HTML.
132
+ continue
133
+
134
+ if len(character) > 1 and all(ord(x) < 128 for x in character):
135
+ # We also do not want to turn _combinations_ of ASCII
136
+ # characters like 'fj' into named entities like '&fjlig;',
137
+ # though that's more debateable.
138
+ continue
139
+
140
+ # Second, some named entities have a Unicode value that's
141
+ # a subset of the Unicode value for some _other_ named
142
+ # entity. As an example, \u2267' is &GreaterFullEqual;,
143
+ # but '\u2267\u0338' is &NotGreaterFullEqual;. Our regular
144
+ # expression needs to match the first two characters of
145
+ # "\u2267\u0338foo", but only the first character of
146
+ # "\u2267foo".
147
+ #
148
+ # In this step, we build two sets of characters that
149
+ # _eventually_ need to go into the regular expression. But
150
+ # we won't know exactly what the regular expression needs
151
+ # to look like until we've gone through the entire list of
152
+ # named entities.
153
+ if len(character) == 1:
154
+ short_entities.add(character)
155
+ else:
156
+ long_entities_by_first_character[character[0]].add(character)
157
+
158
+ # Now that we've been through the entire list of entities, we
159
+ # can create a regular expression that matches any of them.
160
+ particles = set()
161
+ for short in short_entities:
162
+ long_versions = long_entities_by_first_character[short]
163
+ if not long_versions:
164
+ particles.add(short)
165
+ else:
166
+ ignore = "".join([x[1] for x in long_versions])
167
+ # This finds, e.g. \u2267 but only if it is _not_
168
+ # followed by \u0338.
169
+ particles.add("%s(?![%s])" % (short, ignore))
170
+
171
+ for long_entities in list(long_entities_by_first_character.values()):
172
+ for long_entity in long_entities:
173
+ particles.add(long_entity)
174
+
175
+ re_definition = "(%s)" % "|".join(particles)
176
+
177
+ # If an entity shows up in both html5 and codepoint2name, it's
178
+ # likely that HTML5 gives it several different names, such as
179
+ # 'rsquo' and 'rsquor'. When converting Unicode characters to
180
+ # named entities, the codepoint2name name should take
181
+ # precedence where possible, since that's the more easily
182
+ # recognizable one.
183
+ for codepoint, name in list(codepoint2name.items()):
184
+ character = chr(codepoint)
185
+ unicode_to_name[character] = name
186
+
187
+ return unicode_to_name, name_to_unicode, re.compile(re_definition)
188
+ (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
189
+ CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
190
+
191
+ CHARACTER_TO_XML_ENTITY = {
192
+ "'": "apos",
193
+ '"': "quot",
194
+ "&": "amp",
195
+ "<": "lt",
196
+ ">": "gt",
197
+ }
198
+
199
+ BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
200
+ "&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)"
201
+ ")")
202
+
203
+ AMPERSAND_OR_BRACKET = re.compile("([<>&])")
204
+
205
+ @classmethod
206
+ def _substitute_html_entity(cls, matchobj):
207
+ """Used with a regular expression to substitute the
208
+ appropriate HTML entity for a special character string."""
209
+ entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
210
+ return "&%s;" % entity
211
+
212
+ @classmethod
213
+ def _substitute_xml_entity(cls, matchobj):
214
+ """Used with a regular expression to substitute the
215
+ appropriate XML entity for a special character string."""
216
+ entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
217
+ return "&%s;" % entity
218
+
219
+ @classmethod
220
+ def quoted_attribute_value(self, value):
221
+ """Make a value into a quoted XML attribute, possibly escaping it.
222
+
223
+ Most strings will be quoted using double quotes.
224
+
225
+ Bob's Bar -> "Bob's Bar"
226
+
227
+ If a string contains double quotes, it will be quoted using
228
+ single quotes.
229
+
230
+ Welcome to "my bar" -> 'Welcome to "my bar"'
231
+
232
+ If a string contains both single and double quotes, the
233
+ double quotes will be escaped, and the string will be quoted
234
+ using double quotes.
235
+
236
+ Welcome to "Bob's Bar" -> "Welcome to &quot;Bob's bar&quot;
237
+ """
238
+ quote_with = '"'
239
+ if '"' in value:
240
+ if "'" in value:
241
+ # The string contains both single and double
242
+ # quotes. Turn the double quotes into
243
+ # entities. We quote the double quotes rather than
244
+ # the single quotes because the entity name is
245
+ # "&quot;" whether this is HTML or XML. If we
246
+ # quoted the single quotes, we'd have to decide
247
+ # between &apos; and &squot;.
248
+ replace_with = "&quot;"
249
+ value = value.replace('"', replace_with)
250
+ else:
251
+ # There are double quotes but no single quotes.
252
+ # We can use single quotes to quote the attribute.
253
+ quote_with = "'"
254
+ return quote_with + value + quote_with
255
+
256
+ @classmethod
257
+ def substitute_xml(cls, value, make_quoted_attribute=False):
258
+ """Substitute XML entities for special XML characters.
259
+
260
+ :param value: A string to be substituted. The less-than sign
261
+ will become &lt;, the greater-than sign will become &gt;,
262
+ and any ampersands will become &amp;. If you want ampersands
263
+ that appear to be part of an entity definition to be left
264
+ alone, use substitute_xml_containing_entities() instead.
265
+
266
+ :param make_quoted_attribute: If True, then the string will be
267
+ quoted, as befits an attribute value.
268
+ """
269
+ # Escape angle brackets and ampersands.
270
+ value = cls.AMPERSAND_OR_BRACKET.sub(
271
+ cls._substitute_xml_entity, value)
272
+
273
+ if make_quoted_attribute:
274
+ value = cls.quoted_attribute_value(value)
275
+ return value
276
+
277
+ @classmethod
278
+ def substitute_xml_containing_entities(
279
+ cls, value, make_quoted_attribute=False):
280
+ """Substitute XML entities for special XML characters.
281
+
282
+ :param value: A string to be substituted. The less-than sign will
283
+ become &lt;, the greater-than sign will become &gt;, and any
284
+ ampersands that are not part of an entity defition will
285
+ become &amp;.
286
+
287
+ :param make_quoted_attribute: If True, then the string will be
288
+ quoted, as befits an attribute value.
289
+ """
290
+ # Escape angle brackets, and ampersands that aren't part of
291
+ # entities.
292
+ value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
293
+ cls._substitute_xml_entity, value)
294
+
295
+ if make_quoted_attribute:
296
+ value = cls.quoted_attribute_value(value)
297
+ return value
298
+
299
+ @classmethod
300
+ def substitute_html(cls, s):
301
+ """Replace certain Unicode characters with named HTML entities.
302
+
303
+ This differs from data.encode(encoding, 'xmlcharrefreplace')
304
+ in that the goal is to make the result more readable (to those
305
+ with ASCII displays) rather than to recover from
306
+ errors. There's absolutely nothing wrong with a UTF-8 string
307
+ containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
308
+ character with "&eacute;" will make it more readable to some
309
+ people.
310
+
311
+ :param s: A Unicode string.
312
+ """
313
+ return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
314
+ cls._substitute_html_entity, s)
315
+
316
+
317
+ class EncodingDetector:
318
+ """Suggests a number of possible encodings for a bytestring.
319
+
320
+ Order of precedence:
321
+
322
+ 1. Encodings you specifically tell EncodingDetector to try first
323
+ (the known_definite_encodings argument to the constructor).
324
+
325
+ 2. An encoding determined by sniffing the document's byte-order mark.
326
+
327
+ 3. Encodings you specifically tell EncodingDetector to try if
328
+ byte-order mark sniffing fails (the user_encodings argument to the
329
+ constructor).
330
+
331
+ 4. An encoding declared within the bytestring itself, either in an
332
+ XML declaration (if the bytestring is to be interpreted as an XML
333
+ document), or in a <meta> tag (if the bytestring is to be
334
+ interpreted as an HTML document.)
335
+
336
+ 5. An encoding detected through textual analysis by chardet,
337
+ cchardet, or a similar external library.
338
+
339
+ 4. UTF-8.
340
+
341
+ 5. Windows-1252.
342
+
343
+ """
344
+ def __init__(self, markup, known_definite_encodings=None,
345
+ is_html=False, exclude_encodings=None,
346
+ user_encodings=None, override_encodings=None):
347
+ """Constructor.
348
+
349
+ :param markup: Some markup in an unknown encoding.
350
+
351
+ :param known_definite_encodings: When determining the encoding
352
+ of `markup`, these encodings will be tried first, in
353
+ order. In HTML terms, this corresponds to the "known
354
+ definite encoding" step defined here:
355
+ https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
356
+
357
+ :param user_encodings: These encodings will be tried after the
358
+ `known_definite_encodings` have been tried and failed, and
359
+ after an attempt to sniff the encoding by looking at a
360
+ byte order mark has failed. In HTML terms, this
361
+ corresponds to the step "user has explicitly instructed
362
+ the user agent to override the document's character
363
+ encoding", defined here:
364
+ https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
365
+
366
+ :param override_encodings: A deprecated alias for
367
+ known_definite_encodings. Any encodings here will be tried
368
+ immediately after the encodings in
369
+ known_definite_encodings.
370
+
371
+ :param is_html: If True, this markup is considered to be
372
+ HTML. Otherwise it's assumed to be XML.
373
+
374
+ :param exclude_encodings: These encodings will not be tried,
375
+ even if they otherwise would be.
376
+
377
+ """
378
+ self.known_definite_encodings = list(known_definite_encodings or [])
379
+ if override_encodings:
380
+ self.known_definite_encodings += override_encodings
381
+ self.user_encodings = user_encodings or []
382
+ exclude_encodings = exclude_encodings or []
383
+ self.exclude_encodings = set([x.lower() for x in exclude_encodings])
384
+ self.chardet_encoding = None
385
+ self.is_html = is_html
386
+ self.declared_encoding = None
387
+
388
+ # First order of business: strip a byte-order mark.
389
+ self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
390
+
391
+ def _usable(self, encoding, tried):
392
+ """Should we even bother to try this encoding?
393
+
394
+ :param encoding: Name of an encoding.
395
+ :param tried: Encodings that have already been tried. This will be modified
396
+ as a side effect.
397
+ """
398
+ if encoding is not None:
399
+ encoding = encoding.lower()
400
+ if encoding in self.exclude_encodings:
401
+ return False
402
+ if encoding not in tried:
403
+ tried.add(encoding)
404
+ return True
405
+ return False
406
+
407
+ @property
408
+ def encodings(self):
409
+ """Yield a number of encodings that might work for this markup.
410
+
411
+ :yield: A sequence of strings.
412
+ """
413
+ tried = set()
414
+
415
+ # First, try the known definite encodings
416
+ for e in self.known_definite_encodings:
417
+ if self._usable(e, tried):
418
+ yield e
419
+
420
+ # Did the document originally start with a byte-order mark
421
+ # that indicated its encoding?
422
+ if self._usable(self.sniffed_encoding, tried):
423
+ yield self.sniffed_encoding
424
+
425
+ # Sniffing the byte-order mark did nothing; try the user
426
+ # encodings.
427
+ for e in self.user_encodings:
428
+ if self._usable(e, tried):
429
+ yield e
430
+
431
+ # Look within the document for an XML or HTML encoding
432
+ # declaration.
433
+ if self.declared_encoding is None:
434
+ self.declared_encoding = self.find_declared_encoding(
435
+ self.markup, self.is_html)
436
+ if self._usable(self.declared_encoding, tried):
437
+ yield self.declared_encoding
438
+
439
+ # Use third-party character set detection to guess at the
440
+ # encoding.
441
+ if self.chardet_encoding is None:
442
+ self.chardet_encoding = chardet_dammit(self.markup)
443
+ if self._usable(self.chardet_encoding, tried):
444
+ yield self.chardet_encoding
445
+
446
+ # As a last-ditch effort, try utf-8 and windows-1252.
447
+ for e in ('utf-8', 'windows-1252'):
448
+ if self._usable(e, tried):
449
+ yield e
450
+
451
+ @classmethod
452
+ def strip_byte_order_mark(cls, data):
453
+ """If a byte-order mark is present, strip it and return the encoding it implies.
454
+
455
+ :param data: Some markup.
456
+ :return: A 2-tuple (modified data, implied encoding)
457
+ """
458
+ encoding = None
459
+ if isinstance(data, str):
460
+ # Unicode data cannot have a byte-order mark.
461
+ return data, encoding
462
+ if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
463
+ and (data[2:4] != '\x00\x00'):
464
+ encoding = 'utf-16be'
465
+ data = data[2:]
466
+ elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \
467
+ and (data[2:4] != '\x00\x00'):
468
+ encoding = 'utf-16le'
469
+ data = data[2:]
470
+ elif data[:3] == b'\xef\xbb\xbf':
471
+ encoding = 'utf-8'
472
+ data = data[3:]
473
+ elif data[:4] == b'\x00\x00\xfe\xff':
474
+ encoding = 'utf-32be'
475
+ data = data[4:]
476
+ elif data[:4] == b'\xff\xfe\x00\x00':
477
+ encoding = 'utf-32le'
478
+ data = data[4:]
479
+ return data, encoding
480
+
481
+ @classmethod
482
+ def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False):
483
+ """Given a document, tries to find its declared encoding.
484
+
485
+ An XML encoding is declared at the beginning of the document.
486
+
487
+ An HTML encoding is declared in a <meta> tag, hopefully near the
488
+ beginning of the document.
489
+
490
+ :param markup: Some markup.
491
+ :param is_html: If True, this markup is considered to be HTML. Otherwise
492
+ it's assumed to be XML.
493
+ :param search_entire_document: Since an encoding is supposed to declared near the beginning
494
+ of the document, most of the time it's only necessary to search a few kilobytes of data.
495
+ Set this to True to force this method to search the entire document.
496
+ """
497
+ if search_entire_document:
498
+ xml_endpos = html_endpos = len(markup)
499
+ else:
500
+ xml_endpos = 1024
501
+ html_endpos = max(2048, int(len(markup) * 0.05))
502
+
503
+ if isinstance(markup, bytes):
504
+ res = encoding_res[bytes]
505
+ else:
506
+ res = encoding_res[str]
507
+
508
+ xml_re = res['xml']
509
+ html_re = res['html']
510
+ declared_encoding = None
511
+ declared_encoding_match = xml_re.search(markup, endpos=xml_endpos)
512
+ if not declared_encoding_match and is_html:
513
+ declared_encoding_match = html_re.search(markup, endpos=html_endpos)
514
+ if declared_encoding_match is not None:
515
+ declared_encoding = declared_encoding_match.groups()[0]
516
+ if declared_encoding:
517
+ if isinstance(declared_encoding, bytes):
518
+ declared_encoding = declared_encoding.decode('ascii', 'replace')
519
+ return declared_encoding.lower()
520
+ return None
521
+
522
+ class UnicodeDammit:
523
+ """A class for detecting the encoding of a *ML document and
524
+ converting it to a Unicode string. If the source encoding is
525
+ windows-1252, can replace MS smart quotes with their HTML or XML
526
+ equivalents."""
527
+
528
+ # This dictionary maps commonly seen values for "charset" in HTML
529
+ # meta tags to the corresponding Python codec names. It only covers
530
+ # values that aren't in Python's aliases and can't be determined
531
+ # by the heuristics in find_codec.
532
+ CHARSET_ALIASES = {"macintosh": "mac-roman",
533
+ "x-sjis": "shift-jis"}
534
+
535
+ ENCODINGS_WITH_SMART_QUOTES = [
536
+ "windows-1252",
537
+ "iso-8859-1",
538
+ "iso-8859-2",
539
+ ]
540
+
541
+ def __init__(self, markup, known_definite_encodings=[],
542
+ smart_quotes_to=None, is_html=False, exclude_encodings=[],
543
+ user_encodings=None, override_encodings=None
544
+ ):
545
+ """Constructor.
546
+
547
+ :param markup: A bytestring representing markup in an unknown encoding.
548
+
549
+ :param known_definite_encodings: When determining the encoding
550
+ of `markup`, these encodings will be tried first, in
551
+ order. In HTML terms, this corresponds to the "known
552
+ definite encoding" step defined here:
553
+ https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
554
+
555
+ :param user_encodings: These encodings will be tried after the
556
+ `known_definite_encodings` have been tried and failed, and
557
+ after an attempt to sniff the encoding by looking at a
558
+ byte order mark has failed. In HTML terms, this
559
+ corresponds to the step "user has explicitly instructed
560
+ the user agent to override the document's character
561
+ encoding", defined here:
562
+ https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
563
+
564
+ :param override_encodings: A deprecated alias for
565
+ known_definite_encodings. Any encodings here will be tried
566
+ immediately after the encodings in
567
+ known_definite_encodings.
568
+
569
+ :param smart_quotes_to: By default, Microsoft smart quotes will, like all other characters, be converted
570
+ to Unicode characters. Setting this to 'ascii' will convert them to ASCII quotes instead.
571
+ Setting it to 'xml' will convert them to XML entity references, and setting it to 'html'
572
+ will convert them to HTML entity references.
573
+ :param is_html: If True, this markup is considered to be HTML. Otherwise
574
+ it's assumed to be XML.
575
+ :param exclude_encodings: These encodings will not be considered, even
576
+ if the sniffing code thinks they might make sense.
577
+
578
+ """
579
+ self.smart_quotes_to = smart_quotes_to
580
+ self.tried_encodings = []
581
+ self.contains_replacement_characters = False
582
+ self.is_html = is_html
583
+ self.log = logging.getLogger(__name__)
584
+ self.detector = EncodingDetector(
585
+ markup, known_definite_encodings, is_html, exclude_encodings,
586
+ user_encodings, override_encodings
587
+ )
588
+
589
+ # Short-circuit if the data is in Unicode to begin with.
590
+ if isinstance(markup, str) or markup == '':
591
+ self.markup = markup
592
+ self.unicode_markup = str(markup)
593
+ self.original_encoding = None
594
+ return
595
+
596
+ # The encoding detector may have stripped a byte-order mark.
597
+ # Use the stripped markup from this point on.
598
+ self.markup = self.detector.markup
599
+
600
+ u = None
601
+ for encoding in self.detector.encodings:
602
+ markup = self.detector.markup
603
+ u = self._convert_from(encoding)
604
+ if u is not None:
605
+ break
606
+
607
+ if not u:
608
+ # None of the encodings worked. As an absolute last resort,
609
+ # try them again with character replacement.
610
+
611
+ for encoding in self.detector.encodings:
612
+ if encoding != "ascii":
613
+ u = self._convert_from(encoding, "replace")
614
+ if u is not None:
615
+ self.log.warning(
616
+ "Some characters could not be decoded, and were "
617
+ "replaced with REPLACEMENT CHARACTER."
618
+ )
619
+ self.contains_replacement_characters = True
620
+ break
621
+
622
+ # If none of that worked, we could at this point force it to
623
+ # ASCII, but that would destroy so much data that I think
624
+ # giving up is better.
625
+ self.unicode_markup = u
626
+ if not u:
627
+ self.original_encoding = None
628
+
629
+ def _sub_ms_char(self, match):
630
+ """Changes a MS smart quote character to an XML or HTML
631
+ entity, or an ASCII character."""
632
+ orig = match.group(1)
633
+ if self.smart_quotes_to == 'ascii':
634
+ sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
635
+ else:
636
+ sub = self.MS_CHARS.get(orig)
637
+ if type(sub) == tuple:
638
+ if self.smart_quotes_to == 'xml':
639
+ sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
640
+ else:
641
+ sub = '&'.encode() + sub[0].encode() + ';'.encode()
642
+ else:
643
+ sub = sub.encode()
644
+ return sub
645
+
646
+ def _convert_from(self, proposed, errors="strict"):
647
+ """Attempt to convert the markup to the proposed encoding.
648
+
649
+ :param proposed: The name of a character encoding.
650
+ """
651
+ proposed = self.find_codec(proposed)
652
+ if not proposed or (proposed, errors) in self.tried_encodings:
653
+ return None
654
+ self.tried_encodings.append((proposed, errors))
655
+ markup = self.markup
656
+ # Convert smart quotes to HTML if coming from an encoding
657
+ # that might have them.
658
+ if (self.smart_quotes_to is not None
659
+ and proposed in self.ENCODINGS_WITH_SMART_QUOTES):
660
+ smart_quotes_re = b"([\x80-\x9f])"
661
+ smart_quotes_compiled = re.compile(smart_quotes_re)
662
+ markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
663
+
664
+ try:
665
+ #print("Trying to convert document to %s (errors=%s)" % (
666
+ # proposed, errors))
667
+ u = self._to_unicode(markup, proposed, errors)
668
+ self.markup = u
669
+ self.original_encoding = proposed
670
+ except Exception as e:
671
+ #print("That didn't work!")
672
+ #print(e)
673
+ return None
674
+ #print("Correct encoding: %s" % proposed)
675
+ return self.markup
676
+
677
+ def _to_unicode(self, data, encoding, errors="strict"):
678
+ """Given a string and its encoding, decodes the string into Unicode.
679
+
680
+ :param encoding: The name of an encoding.
681
+ """
682
+ return str(data, encoding, errors)
683
+
684
+ @property
685
+ def declared_html_encoding(self):
686
+ """If the markup is an HTML document, returns the encoding declared _within_
687
+ the document.
688
+ """
689
+ if not self.is_html:
690
+ return None
691
+ return self.detector.declared_encoding
692
+
693
+ def find_codec(self, charset):
694
+ """Convert the name of a character set to a codec name.
695
+
696
+ :param charset: The name of a character set.
697
+ :return: The name of a codec.
698
+ """
699
+ value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
700
+ or (charset and self._codec(charset.replace("-", "")))
701
+ or (charset and self._codec(charset.replace("-", "_")))
702
+ or (charset and charset.lower())
703
+ or charset
704
+ )
705
+ if value:
706
+ return value.lower()
707
+ return None
708
+
709
+ def _codec(self, charset):
710
+ if not charset:
711
+ return charset
712
+ codec = None
713
+ try:
714
+ codecs.lookup(charset)
715
+ codec = charset
716
+ except (LookupError, ValueError):
717
+ pass
718
+ return codec
719
+
720
+
721
+ # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
722
+ MS_CHARS = {b'\x80': ('euro', '20AC'),
723
+ b'\x81': ' ',
724
+ b'\x82': ('sbquo', '201A'),
725
+ b'\x83': ('fnof', '192'),
726
+ b'\x84': ('bdquo', '201E'),
727
+ b'\x85': ('hellip', '2026'),
728
+ b'\x86': ('dagger', '2020'),
729
+ b'\x87': ('Dagger', '2021'),
730
+ b'\x88': ('circ', '2C6'),
731
+ b'\x89': ('permil', '2030'),
732
+ b'\x8A': ('Scaron', '160'),
733
+ b'\x8B': ('lsaquo', '2039'),
734
+ b'\x8C': ('OElig', '152'),
735
+ b'\x8D': '?',
736
+ b'\x8E': ('#x17D', '17D'),
737
+ b'\x8F': '?',
738
+ b'\x90': '?',
739
+ b'\x91': ('lsquo', '2018'),
740
+ b'\x92': ('rsquo', '2019'),
741
+ b'\x93': ('ldquo', '201C'),
742
+ b'\x94': ('rdquo', '201D'),
743
+ b'\x95': ('bull', '2022'),
744
+ b'\x96': ('ndash', '2013'),
745
+ b'\x97': ('mdash', '2014'),
746
+ b'\x98': ('tilde', '2DC'),
747
+ b'\x99': ('trade', '2122'),
748
+ b'\x9a': ('scaron', '161'),
749
+ b'\x9b': ('rsaquo', '203A'),
750
+ b'\x9c': ('oelig', '153'),
751
+ b'\x9d': '?',
752
+ b'\x9e': ('#x17E', '17E'),
753
+ b'\x9f': ('Yuml', ''),}
754
+
755
+ # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
756
+ # horrors like stripping diacritical marks to turn á into a, but also
757
+ # contains non-horrors like turning “ into ".
758
+ MS_CHARS_TO_ASCII = {
759
+ b'\x80' : 'EUR',
760
+ b'\x81' : ' ',
761
+ b'\x82' : ',',
762
+ b'\x83' : 'f',
763
+ b'\x84' : ',,',
764
+ b'\x85' : '...',
765
+ b'\x86' : '+',
766
+ b'\x87' : '++',
767
+ b'\x88' : '^',
768
+ b'\x89' : '%',
769
+ b'\x8a' : 'S',
770
+ b'\x8b' : '<',
771
+ b'\x8c' : 'OE',
772
+ b'\x8d' : '?',
773
+ b'\x8e' : 'Z',
774
+ b'\x8f' : '?',
775
+ b'\x90' : '?',
776
+ b'\x91' : "'",
777
+ b'\x92' : "'",
778
+ b'\x93' : '"',
779
+ b'\x94' : '"',
780
+ b'\x95' : '*',
781
+ b'\x96' : '-',
782
+ b'\x97' : '--',
783
+ b'\x98' : '~',
784
+ b'\x99' : '(TM)',
785
+ b'\x9a' : 's',
786
+ b'\x9b' : '>',
787
+ b'\x9c' : 'oe',
788
+ b'\x9d' : '?',
789
+ b'\x9e' : 'z',
790
+ b'\x9f' : 'Y',
791
+ b'\xa0' : ' ',
792
+ b'\xa1' : '!',
793
+ b'\xa2' : 'c',
794
+ b'\xa3' : 'GBP',
795
+ b'\xa4' : '$', #This approximation is especially parochial--this is the
796
+ #generic currency symbol.
797
+ b'\xa5' : 'YEN',
798
+ b'\xa6' : '|',
799
+ b'\xa7' : 'S',
800
+ b'\xa8' : '..',
801
+ b'\xa9' : '',
802
+ b'\xaa' : '(th)',
803
+ b'\xab' : '<<',
804
+ b'\xac' : '!',
805
+ b'\xad' : ' ',
806
+ b'\xae' : '(R)',
807
+ b'\xaf' : '-',
808
+ b'\xb0' : 'o',
809
+ b'\xb1' : '+-',
810
+ b'\xb2' : '2',
811
+ b'\xb3' : '3',
812
+ b'\xb4' : ("'", 'acute'),
813
+ b'\xb5' : 'u',
814
+ b'\xb6' : 'P',
815
+ b'\xb7' : '*',
816
+ b'\xb8' : ',',
817
+ b'\xb9' : '1',
818
+ b'\xba' : '(th)',
819
+ b'\xbb' : '>>',
820
+ b'\xbc' : '1/4',
821
+ b'\xbd' : '1/2',
822
+ b'\xbe' : '3/4',
823
+ b'\xbf' : '?',
824
+ b'\xc0' : 'A',
825
+ b'\xc1' : 'A',
826
+ b'\xc2' : 'A',
827
+ b'\xc3' : 'A',
828
+ b'\xc4' : 'A',
829
+ b'\xc5' : 'A',
830
+ b'\xc6' : 'AE',
831
+ b'\xc7' : 'C',
832
+ b'\xc8' : 'E',
833
+ b'\xc9' : 'E',
834
+ b'\xca' : 'E',
835
+ b'\xcb' : 'E',
836
+ b'\xcc' : 'I',
837
+ b'\xcd' : 'I',
838
+ b'\xce' : 'I',
839
+ b'\xcf' : 'I',
840
+ b'\xd0' : 'D',
841
+ b'\xd1' : 'N',
842
+ b'\xd2' : 'O',
843
+ b'\xd3' : 'O',
844
+ b'\xd4' : 'O',
845
+ b'\xd5' : 'O',
846
+ b'\xd6' : 'O',
847
+ b'\xd7' : '*',
848
+ b'\xd8' : 'O',
849
+ b'\xd9' : 'U',
850
+ b'\xda' : 'U',
851
+ b'\xdb' : 'U',
852
+ b'\xdc' : 'U',
853
+ b'\xdd' : 'Y',
854
+ b'\xde' : 'b',
855
+ b'\xdf' : 'B',
856
+ b'\xe0' : 'a',
857
+ b'\xe1' : 'a',
858
+ b'\xe2' : 'a',
859
+ b'\xe3' : 'a',
860
+ b'\xe4' : 'a',
861
+ b'\xe5' : 'a',
862
+ b'\xe6' : 'ae',
863
+ b'\xe7' : 'c',
864
+ b'\xe8' : 'e',
865
+ b'\xe9' : 'e',
866
+ b'\xea' : 'e',
867
+ b'\xeb' : 'e',
868
+ b'\xec' : 'i',
869
+ b'\xed' : 'i',
870
+ b'\xee' : 'i',
871
+ b'\xef' : 'i',
872
+ b'\xf0' : 'o',
873
+ b'\xf1' : 'n',
874
+ b'\xf2' : 'o',
875
+ b'\xf3' : 'o',
876
+ b'\xf4' : 'o',
877
+ b'\xf5' : 'o',
878
+ b'\xf6' : 'o',
879
+ b'\xf7' : '/',
880
+ b'\xf8' : 'o',
881
+ b'\xf9' : 'u',
882
+ b'\xfa' : 'u',
883
+ b'\xfb' : 'u',
884
+ b'\xfc' : 'u',
885
+ b'\xfd' : 'y',
886
+ b'\xfe' : 'b',
887
+ b'\xff' : 'y',
888
+ }
889
+
890
+ # A map used when removing rogue Windows-1252/ISO-8859-1
891
+ # characters in otherwise UTF-8 documents.
892
+ #
893
+ # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in
894
+ # Windows-1252.
895
+ WINDOWS_1252_TO_UTF8 = {
896
+ 0x80 : b'\xe2\x82\xac', # €
897
+ 0x82 : b'\xe2\x80\x9a', # ‚
898
+ 0x83 : b'\xc6\x92', # ƒ
899
+ 0x84 : b'\xe2\x80\x9e', # „
900
+ 0x85 : b'\xe2\x80\xa6', # …
901
+ 0x86 : b'\xe2\x80\xa0', # †
902
+ 0x87 : b'\xe2\x80\xa1', # ‡
903
+ 0x88 : b'\xcb\x86', # ˆ
904
+ 0x89 : b'\xe2\x80\xb0', # ‰
905
+ 0x8a : b'\xc5\xa0', # Š
906
+ 0x8b : b'\xe2\x80\xb9', # ‹
907
+ 0x8c : b'\xc5\x92', # Œ
908
+ 0x8e : b'\xc5\xbd', # Ž
909
+ 0x91 : b'\xe2\x80\x98', # ‘
910
+ 0x92 : b'\xe2\x80\x99', # ’
911
+ 0x93 : b'\xe2\x80\x9c', # “
912
+ 0x94 : b'\xe2\x80\x9d', # ”
913
+ 0x95 : b'\xe2\x80\xa2', # •
914
+ 0x96 : b'\xe2\x80\x93', # –
915
+ 0x97 : b'\xe2\x80\x94', # —
916
+ 0x98 : b'\xcb\x9c', # ˜
917
+ 0x99 : b'\xe2\x84\xa2', # ™
918
+ 0x9a : b'\xc5\xa1', # š
919
+ 0x9b : b'\xe2\x80\xba', # ›
920
+ 0x9c : b'\xc5\x93', # œ
921
+ 0x9e : b'\xc5\xbe', # ž
922
+ 0x9f : b'\xc5\xb8', # Ÿ
923
+ 0xa0 : b'\xc2\xa0', #  
924
+ 0xa1 : b'\xc2\xa1', # ¡
925
+ 0xa2 : b'\xc2\xa2', # ¢
926
+ 0xa3 : b'\xc2\xa3', # £
927
+ 0xa4 : b'\xc2\xa4', # ¤
928
+ 0xa5 : b'\xc2\xa5', # ¥
929
+ 0xa6 : b'\xc2\xa6', # ¦
930
+ 0xa7 : b'\xc2\xa7', # §
931
+ 0xa8 : b'\xc2\xa8', # ¨
932
+ 0xa9 : b'\xc2\xa9', # ©
933
+ 0xaa : b'\xc2\xaa', # ª
934
+ 0xab : b'\xc2\xab', # «
935
+ 0xac : b'\xc2\xac', # ¬
936
+ 0xad : b'\xc2\xad', # ­
937
+ 0xae : b'\xc2\xae', # ®
938
+ 0xaf : b'\xc2\xaf', # ¯
939
+ 0xb0 : b'\xc2\xb0', # °
940
+ 0xb1 : b'\xc2\xb1', # ±
941
+ 0xb2 : b'\xc2\xb2', # ²
942
+ 0xb3 : b'\xc2\xb3', # ³
943
+ 0xb4 : b'\xc2\xb4', # ´
944
+ 0xb5 : b'\xc2\xb5', # µ
945
+ 0xb6 : b'\xc2\xb6', # ¶
946
+ 0xb7 : b'\xc2\xb7', # ·
947
+ 0xb8 : b'\xc2\xb8', # ¸
948
+ 0xb9 : b'\xc2\xb9', # ¹
949
+ 0xba : b'\xc2\xba', # º
950
+ 0xbb : b'\xc2\xbb', # »
951
+ 0xbc : b'\xc2\xbc', # ¼
952
+ 0xbd : b'\xc2\xbd', # ½
953
+ 0xbe : b'\xc2\xbe', # ¾
954
+ 0xbf : b'\xc2\xbf', # ¿
955
+ 0xc0 : b'\xc3\x80', # À
956
+ 0xc1 : b'\xc3\x81', # Á
957
+ 0xc2 : b'\xc3\x82', # Â
958
+ 0xc3 : b'\xc3\x83', # Ã
959
+ 0xc4 : b'\xc3\x84', # Ä
960
+ 0xc5 : b'\xc3\x85', # Å
961
+ 0xc6 : b'\xc3\x86', # Æ
962
+ 0xc7 : b'\xc3\x87', # Ç
963
+ 0xc8 : b'\xc3\x88', # È
964
+ 0xc9 : b'\xc3\x89', # É
965
+ 0xca : b'\xc3\x8a', # Ê
966
+ 0xcb : b'\xc3\x8b', # Ë
967
+ 0xcc : b'\xc3\x8c', # Ì
968
+ 0xcd : b'\xc3\x8d', # Í
969
+ 0xce : b'\xc3\x8e', # Î
970
+ 0xcf : b'\xc3\x8f', # Ï
971
+ 0xd0 : b'\xc3\x90', # Ð
972
+ 0xd1 : b'\xc3\x91', # Ñ
973
+ 0xd2 : b'\xc3\x92', # Ò
974
+ 0xd3 : b'\xc3\x93', # Ó
975
+ 0xd4 : b'\xc3\x94', # Ô
976
+ 0xd5 : b'\xc3\x95', # Õ
977
+ 0xd6 : b'\xc3\x96', # Ö
978
+ 0xd7 : b'\xc3\x97', # ×
979
+ 0xd8 : b'\xc3\x98', # Ø
980
+ 0xd9 : b'\xc3\x99', # Ù
981
+ 0xda : b'\xc3\x9a', # Ú
982
+ 0xdb : b'\xc3\x9b', # Û
983
+ 0xdc : b'\xc3\x9c', # Ü
984
+ 0xdd : b'\xc3\x9d', # Ý
985
+ 0xde : b'\xc3\x9e', # Þ
986
+ 0xdf : b'\xc3\x9f', # ß
987
+ 0xe0 : b'\xc3\xa0', # à
988
+ 0xe1 : b'\xa1', # á
989
+ 0xe2 : b'\xc3\xa2', # â
990
+ 0xe3 : b'\xc3\xa3', # ã
991
+ 0xe4 : b'\xc3\xa4', # ä
992
+ 0xe5 : b'\xc3\xa5', # å
993
+ 0xe6 : b'\xc3\xa6', # æ
994
+ 0xe7 : b'\xc3\xa7', # ç
995
+ 0xe8 : b'\xc3\xa8', # è
996
+ 0xe9 : b'\xc3\xa9', # é
997
+ 0xea : b'\xc3\xaa', # ê
998
+ 0xeb : b'\xc3\xab', # ë
999
+ 0xec : b'\xc3\xac', # ì
1000
+ 0xed : b'\xc3\xad', # í
1001
+ 0xee : b'\xc3\xae', # î
1002
+ 0xef : b'\xc3\xaf', # ï
1003
+ 0xf0 : b'\xc3\xb0', # ð
1004
+ 0xf1 : b'\xc3\xb1', # ñ
1005
+ 0xf2 : b'\xc3\xb2', # ò
1006
+ 0xf3 : b'\xc3\xb3', # ó
1007
+ 0xf4 : b'\xc3\xb4', # ô
1008
+ 0xf5 : b'\xc3\xb5', # õ
1009
+ 0xf6 : b'\xc3\xb6', # ö
1010
+ 0xf7 : b'\xc3\xb7', # ÷
1011
+ 0xf8 : b'\xc3\xb8', # ø
1012
+ 0xf9 : b'\xc3\xb9', # ù
1013
+ 0xfa : b'\xc3\xba', # ú
1014
+ 0xfb : b'\xc3\xbb', # û
1015
+ 0xfc : b'\xc3\xbc', # ü
1016
+ 0xfd : b'\xc3\xbd', # ý
1017
+ 0xfe : b'\xc3\xbe', # þ
1018
+ }
1019
+
1020
+ MULTIBYTE_MARKERS_AND_SIZES = [
1021
+ (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF
1022
+ (0xe0, 0xef, 3), # 3-byte characters start with E0-EF
1023
+ (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4
1024
+ ]
1025
+
1026
+ FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0]
1027
+ LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1]
1028
+
1029
+ @classmethod
1030
+ def detwingle(cls, in_bytes, main_encoding="utf8",
1031
+ embedded_encoding="windows-1252"):
1032
+ """Fix characters from one encoding embedded in some other encoding.
1033
+
1034
+ Currently the only situation supported is Windows-1252 (or its
1035
+ subset ISO-8859-1), embedded in UTF-8.
1036
+
1037
+ :param in_bytes: A bytestring that you suspect contains
1038
+ characters from multiple encodings. Note that this _must_
1039
+ be a bytestring. If you've already converted the document
1040
+ to Unicode, you're too late.
1041
+ :param main_encoding: The primary encoding of `in_bytes`.
1042
+ :param embedded_encoding: The encoding that was used to embed characters
1043
+ in the main document.
1044
+ :return: A bytestring in which `embedded_encoding`
1045
+ characters have been converted to their `main_encoding`
1046
+ equivalents.
1047
+ """
1048
+ if embedded_encoding.replace('_', '-').lower() not in (
1049
+ 'windows-1252', 'windows_1252'):
1050
+ raise NotImplementedError(
1051
+ "Windows-1252 and ISO-8859-1 are the only currently supported "
1052
+ "embedded encodings.")
1053
+
1054
+ if main_encoding.lower() not in ('utf8', 'utf-8'):
1055
+ raise NotImplementedError(
1056
+ "UTF-8 is the only currently supported main encoding.")
1057
+
1058
+ byte_chunks = []
1059
+
1060
+ chunk_start = 0
1061
+ pos = 0
1062
+ while pos < len(in_bytes):
1063
+ byte = in_bytes[pos]
1064
+ if not isinstance(byte, int):
1065
+ # Python 2.x
1066
+ byte = ord(byte)
1067
+ if (byte >= cls.FIRST_MULTIBYTE_MARKER
1068
+ and byte <= cls.LAST_MULTIBYTE_MARKER):
1069
+ # This is the start of a UTF-8 multibyte character. Skip
1070
+ # to the end.
1071
+ for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
1072
+ if byte >= start and byte <= end:
1073
+ pos += size
1074
+ break
1075
+ elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
1076
+ # We found a Windows-1252 character!
1077
+ # Save the string up to this point as a chunk.
1078
+ byte_chunks.append(in_bytes[chunk_start:pos])
1079
+
1080
+ # Now translate the Windows-1252 character into UTF-8
1081
+ # and add it as another, one-byte chunk.
1082
+ byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
1083
+ pos += 1
1084
+ chunk_start = pos
1085
+ else:
1086
+ # Go on to the next character.
1087
+ pos += 1
1088
+ if chunk_start == 0:
1089
+ # The string is unchanged.
1090
+ return in_bytes
1091
+ else:
1092
+ # Store the final chunk.
1093
+ byte_chunks.append(in_bytes[chunk_start:])
1094
+ return b''.join(byte_chunks)
1095
+
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/diagnose.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Diagnostic functions, mainly for use when doing tech support."""
2
+
3
+ # Use of this source code is governed by the MIT license.
4
+ __license__ = "MIT"
5
+
6
+ import cProfile
7
+ from io import BytesIO
8
+ from html.parser import HTMLParser
9
+ import bs4
10
+ from bs4 import BeautifulSoup, __version__
11
+ from bs4.builder import builder_registry
12
+
13
+ import os
14
+ import pstats
15
+ import random
16
+ import tempfile
17
+ import time
18
+ import traceback
19
+ import sys
20
+ import cProfile
21
+
22
+ def diagnose(data):
23
+ """Diagnostic suite for isolating common problems.
24
+
25
+ :param data: A string containing markup that needs to be explained.
26
+ :return: None; diagnostics are printed to standard output.
27
+ """
28
+ print(("Diagnostic running on Beautiful Soup %s" % __version__))
29
+ print(("Python version %s" % sys.version))
30
+
31
+ basic_parsers = ["html.parser", "html5lib", "lxml"]
32
+ for name in basic_parsers:
33
+ for builder in builder_registry.builders:
34
+ if name in builder.features:
35
+ break
36
+ else:
37
+ basic_parsers.remove(name)
38
+ print((
39
+ "I noticed that %s is not installed. Installing it may help." %
40
+ name))
41
+
42
+ if 'lxml' in basic_parsers:
43
+ basic_parsers.append("lxml-xml")
44
+ try:
45
+ from lxml import etree
46
+ print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))))
47
+ except ImportError as e:
48
+ print(
49
+ "lxml is not installed or couldn't be imported.")
50
+
51
+
52
+ if 'html5lib' in basic_parsers:
53
+ try:
54
+ import html5lib
55
+ print(("Found html5lib version %s" % html5lib.__version__))
56
+ except ImportError as e:
57
+ print(
58
+ "html5lib is not installed or couldn't be imported.")
59
+
60
+ if hasattr(data, 'read'):
61
+ data = data.read()
62
+
63
+ for parser in basic_parsers:
64
+ print(("Trying to parse your markup with %s" % parser))
65
+ success = False
66
+ try:
67
+ soup = BeautifulSoup(data, features=parser)
68
+ success = True
69
+ except Exception as e:
70
+ print(("%s could not parse the markup." % parser))
71
+ traceback.print_exc()
72
+ if success:
73
+ print(("Here's what %s did with the markup:" % parser))
74
+ print((soup.prettify()))
75
+
76
+ print(("-" * 80))
77
+
78
+ def lxml_trace(data, html=True, **kwargs):
79
+ """Print out the lxml events that occur during parsing.
80
+
81
+ This lets you see how lxml parses a document when no Beautiful
82
+ Soup code is running. You can use this to determine whether
83
+ an lxml-specific problem is in Beautiful Soup's lxml tree builders
84
+ or in lxml itself.
85
+
86
+ :param data: Some markup.
87
+ :param html: If True, markup will be parsed with lxml's HTML parser.
88
+ if False, lxml's XML parser will be used.
89
+ """
90
+ from lxml import etree
91
+ recover = kwargs.pop('recover', True)
92
+ if isinstance(data, str):
93
+ data = data.encode("utf8")
94
+ reader = BytesIO(data)
95
+ for event, element in etree.iterparse(
96
+ reader, html=html, recover=recover, **kwargs
97
+ ):
98
+ print(("%s, %4s, %s" % (event, element.tag, element.text)))
99
+
100
+ class AnnouncingParser(HTMLParser):
101
+ """Subclass of HTMLParser that announces parse events, without doing
102
+ anything else.
103
+
104
+ You can use this to get a picture of how html.parser sees a given
105
+ document. The easiest way to do this is to call `htmlparser_trace`.
106
+ """
107
+
108
+ def _p(self, s):
109
+ print(s)
110
+
111
+ def handle_starttag(self, name, attrs):
112
+ self._p("%s START" % name)
113
+
114
+ def handle_endtag(self, name):
115
+ self._p("%s END" % name)
116
+
117
+ def handle_data(self, data):
118
+ self._p("%s DATA" % data)
119
+
120
+ def handle_charref(self, name):
121
+ self._p("%s CHARREF" % name)
122
+
123
+ def handle_entityref(self, name):
124
+ self._p("%s ENTITYREF" % name)
125
+
126
+ def handle_comment(self, data):
127
+ self._p("%s COMMENT" % data)
128
+
129
+ def handle_decl(self, data):
130
+ self._p("%s DECL" % data)
131
+
132
+ def unknown_decl(self, data):
133
+ self._p("%s UNKNOWN-DECL" % data)
134
+
135
+ def handle_pi(self, data):
136
+ self._p("%s PI" % data)
137
+
138
+ def htmlparser_trace(data):
139
+ """Print out the HTMLParser events that occur during parsing.
140
+
141
+ This lets you see how HTMLParser parses a document when no
142
+ Beautiful Soup code is running.
143
+
144
+ :param data: Some markup.
145
+ """
146
+ parser = AnnouncingParser()
147
+ parser.feed(data)
148
+
149
+ _vowels = "aeiou"
150
+ _consonants = "bcdfghjklmnpqrstvwxyz"
151
+
152
+ def rword(length=5):
153
+ "Generate a random word-like string."
154
+ s = ''
155
+ for i in range(length):
156
+ if i % 2 == 0:
157
+ t = _consonants
158
+ else:
159
+ t = _vowels
160
+ s += random.choice(t)
161
+ return s
162
+
163
+ def rsentence(length=4):
164
+ "Generate a random sentence-like string."
165
+ return " ".join(rword(random.randint(4,9)) for i in range(length))
166
+
167
+ def rdoc(num_elements=1000):
168
+ """Randomly generate an invalid HTML document."""
169
+ tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
170
+ elements = []
171
+ for i in range(num_elements):
172
+ choice = random.randint(0,3)
173
+ if choice == 0:
174
+ # New tag.
175
+ tag_name = random.choice(tag_names)
176
+ elements.append("<%s>" % tag_name)
177
+ elif choice == 1:
178
+ elements.append(rsentence(random.randint(1,4)))
179
+ elif choice == 2:
180
+ # Close a tag.
181
+ tag_name = random.choice(tag_names)
182
+ elements.append("</%s>" % tag_name)
183
+ return "<html>" + "\n".join(elements) + "</html>"
184
+
185
+ def benchmark_parsers(num_elements=100000):
186
+ """Very basic head-to-head performance benchmark."""
187
+ print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
188
+ data = rdoc(num_elements)
189
+ print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
190
+
191
+ for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
192
+ success = False
193
+ try:
194
+ a = time.time()
195
+ soup = BeautifulSoup(data, parser)
196
+ b = time.time()
197
+ success = True
198
+ except Exception as e:
199
+ print(("%s could not parse the markup." % parser))
200
+ traceback.print_exc()
201
+ if success:
202
+ print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a)))
203
+
204
+ from lxml import etree
205
+ a = time.time()
206
+ etree.HTML(data)
207
+ b = time.time()
208
+ print(("Raw lxml parsed the markup in %.2fs." % (b-a)))
209
+
210
+ import html5lib
211
+ parser = html5lib.HTMLParser()
212
+ a = time.time()
213
+ parser.parse(data)
214
+ b = time.time()
215
+ print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
216
+
217
+ def profile(num_elements=100000, parser="lxml"):
218
+ """Use Python's profiler on a randomly generated document."""
219
+ filehandle = tempfile.NamedTemporaryFile()
220
+ filename = filehandle.name
221
+
222
+ data = rdoc(num_elements)
223
+ vars = dict(bs4=bs4, data=data, parser=parser)
224
+ cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
225
+
226
+ stats = pstats.Stats(filename)
227
+ # stats.strip_dirs()
228
+ stats.sort_stats("cumulative")
229
+ stats.print_stats('_html5lib|bs4', 50)
230
+
231
+ # If this file is run as a script, standard input is diagnosed.
232
+ if __name__ == '__main__':
233
+ diagnose(sys.stdin.read())
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/element.py ADDED
@@ -0,0 +1,2435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use of this source code is governed by the MIT license.
2
+ __license__ = "MIT"
3
+
4
+ try:
5
+ from collections.abc import Callable # Python 3.6
6
+ except ImportError as e:
7
+ from collections import Callable
8
+ import re
9
+ import sys
10
+ import warnings
11
+
12
+ from bs4.css import CSS
13
+ from bs4.formatter import (
14
+ Formatter,
15
+ HTMLFormatter,
16
+ XMLFormatter,
17
+ )
18
+
19
+ DEFAULT_OUTPUT_ENCODING = "utf-8"
20
+
21
+ nonwhitespace_re = re.compile(r"\S+")
22
+
23
+ # NOTE: This isn't used as of 4.7.0. I'm leaving it for a little bit on
24
+ # the off chance someone imported it for their own use.
25
+ whitespace_re = re.compile(r"\s+")
26
+
27
+ def _alias(attr):
28
+ """Alias one attribute name to another for backward compatibility"""
29
+ @property
30
+ def alias(self):
31
+ return getattr(self, attr)
32
+
33
+ @alias.setter
34
+ def alias(self):
35
+ return setattr(self, attr)
36
+ return alias
37
+
38
+
39
+ # These encodings are recognized by Python (so PageElement.encode
40
+ # could theoretically support them) but XML and HTML don't recognize
41
+ # them (so they should not show up in an XML or HTML document as that
42
+ # document's encoding).
43
+ #
44
+ # If an XML document is encoded in one of these encodings, no encoding
45
+ # will be mentioned in the XML declaration. If an HTML document is
46
+ # encoded in one of these encodings, and the HTML document has a
47
+ # <meta> tag that mentions an encoding, the encoding will be given as
48
+ # the empty string.
49
+ #
50
+ # Source:
51
+ # https://docs.python.org/3/library/codecs.html#python-specific-encodings
52
+ PYTHON_SPECIFIC_ENCODINGS = set([
53
+ "idna",
54
+ "mbcs",
55
+ "oem",
56
+ "palmos",
57
+ "punycode",
58
+ "raw_unicode_escape",
59
+ "undefined",
60
+ "unicode_escape",
61
+ "raw-unicode-escape",
62
+ "unicode-escape",
63
+ "string-escape",
64
+ "string_escape",
65
+ ])
66
+
67
+
68
+ class NamespacedAttribute(str):
69
+ """A namespaced string (e.g. 'xml:lang') that remembers the namespace
70
+ ('xml') and the name ('lang') that were used to create it.
71
+ """
72
+
73
+ def __new__(cls, prefix, name=None, namespace=None):
74
+ if not name:
75
+ # This is the default namespace. Its name "has no value"
76
+ # per https://www.w3.org/TR/xml-names/#defaulting
77
+ name = None
78
+
79
+ if not name:
80
+ obj = str.__new__(cls, prefix)
81
+ elif not prefix:
82
+ # Not really namespaced.
83
+ obj = str.__new__(cls, name)
84
+ else:
85
+ obj = str.__new__(cls, prefix + ":" + name)
86
+ obj.prefix = prefix
87
+ obj.name = name
88
+ obj.namespace = namespace
89
+ return obj
90
+
91
+ class AttributeValueWithCharsetSubstitution(str):
92
+ """A stand-in object for a character encoding specified in HTML."""
93
+
94
+ class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
95
+ """A generic stand-in for the value of a meta tag's 'charset' attribute.
96
+
97
+ When Beautiful Soup parses the markup '<meta charset="utf8">', the
98
+ value of the 'charset' attribute will be one of these objects.
99
+ """
100
+
101
+ def __new__(cls, original_value):
102
+ obj = str.__new__(cls, original_value)
103
+ obj.original_value = original_value
104
+ return obj
105
+
106
+ def encode(self, encoding):
107
+ """When an HTML document is being encoded to a given encoding, the
108
+ value of a meta tag's 'charset' is the name of the encoding.
109
+ """
110
+ if encoding in PYTHON_SPECIFIC_ENCODINGS:
111
+ return ''
112
+ return encoding
113
+
114
+
115
+ class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
116
+ """A generic stand-in for the value of a meta tag's 'content' attribute.
117
+
118
+ When Beautiful Soup parses the markup:
119
+ <meta http-equiv="content-type" content="text/html; charset=utf8">
120
+
121
+ The value of the 'content' attribute will be one of these objects.
122
+ """
123
+
124
+ CHARSET_RE = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M)
125
+
126
+ def __new__(cls, original_value):
127
+ match = cls.CHARSET_RE.search(original_value)
128
+ if match is None:
129
+ # No substitution necessary.
130
+ return str.__new__(str, original_value)
131
+
132
+ obj = str.__new__(cls, original_value)
133
+ obj.original_value = original_value
134
+ return obj
135
+
136
+ def encode(self, encoding):
137
+ if encoding in PYTHON_SPECIFIC_ENCODINGS:
138
+ return ''
139
+ def rewrite(match):
140
+ return match.group(1) + encoding
141
+ return self.CHARSET_RE.sub(rewrite, self.original_value)
142
+
143
+
144
+ class PageElement(object):
145
+ """Contains the navigational information for some part of the page:
146
+ that is, its current location in the parse tree.
147
+
148
+ NavigableString, Tag, etc. are all subclasses of PageElement.
149
+ """
150
+
151
+ # In general, we can't tell just by looking at an element whether
152
+ # it's contained in an XML document or an HTML document. But for
153
+ # Tags (q.v.) we can store this information at parse time.
154
+ known_xml = None
155
+
156
+ def setup(self, parent=None, previous_element=None, next_element=None,
157
+ previous_sibling=None, next_sibling=None):
158
+ """Sets up the initial relations between this element and
159
+ other elements.
160
+
161
+ :param parent: The parent of this element.
162
+
163
+ :param previous_element: The element parsed immediately before
164
+ this one.
165
+
166
+ :param next_element: The element parsed immediately before
167
+ this one.
168
+
169
+ :param previous_sibling: The most recently encountered element
170
+ on the same level of the parse tree as this one.
171
+
172
+ :param previous_sibling: The next element to be encountered
173
+ on the same level of the parse tree as this one.
174
+ """
175
+ self.parent = parent
176
+
177
+ self.previous_element = previous_element
178
+ if previous_element is not None:
179
+ self.previous_element.next_element = self
180
+
181
+ self.next_element = next_element
182
+ if self.next_element is not None:
183
+ self.next_element.previous_element = self
184
+
185
+ self.next_sibling = next_sibling
186
+ if self.next_sibling is not None:
187
+ self.next_sibling.previous_sibling = self
188
+
189
+ if (previous_sibling is None
190
+ and self.parent is not None and self.parent.contents):
191
+ previous_sibling = self.parent.contents[-1]
192
+
193
+ self.previous_sibling = previous_sibling
194
+ if previous_sibling is not None:
195
+ self.previous_sibling.next_sibling = self
196
+
197
+ def format_string(self, s, formatter):
198
+ """Format the given string using the given formatter.
199
+
200
+ :param s: A string.
201
+ :param formatter: A Formatter object, or a string naming one of the standard formatters.
202
+ """
203
+ if formatter is None:
204
+ return s
205
+ if not isinstance(formatter, Formatter):
206
+ formatter = self.formatter_for_name(formatter)
207
+ output = formatter.substitute(s)
208
+ return output
209
+
210
+ def formatter_for_name(self, formatter):
211
+ """Look up or create a Formatter for the given identifier,
212
+ if necessary.
213
+
214
+ :param formatter: Can be a Formatter object (used as-is), a
215
+ function (used as the entity substitution hook for an
216
+ XMLFormatter or HTMLFormatter), or a string (used to look
217
+ up an XMLFormatter or HTMLFormatter in the appropriate
218
+ registry.
219
+ """
220
+ if isinstance(formatter, Formatter):
221
+ return formatter
222
+ if self._is_xml:
223
+ c = XMLFormatter
224
+ else:
225
+ c = HTMLFormatter
226
+ if isinstance(formatter, Callable):
227
+ return c(entity_substitution=formatter)
228
+ return c.REGISTRY[formatter]
229
+
230
+ @property
231
+ def _is_xml(self):
232
+ """Is this element part of an XML tree or an HTML tree?
233
+
234
+ This is used in formatter_for_name, when deciding whether an
235
+ XMLFormatter or HTMLFormatter is more appropriate. It can be
236
+ inefficient, but it should be called very rarely.
237
+ """
238
+ if self.known_xml is not None:
239
+ # Most of the time we will have determined this when the
240
+ # document is parsed.
241
+ return self.known_xml
242
+
243
+ # Otherwise, it's likely that this element was created by
244
+ # direct invocation of the constructor from within the user's
245
+ # Python code.
246
+ if self.parent is None:
247
+ # This is the top-level object. It should have .known_xml set
248
+ # from tree creation. If not, take a guess--BS is usually
249
+ # used on HTML markup.
250
+ return getattr(self, 'is_xml', False)
251
+ return self.parent._is_xml
252
+
253
+ nextSibling = _alias("next_sibling") # BS3
254
+ previousSibling = _alias("previous_sibling") # BS3
255
+
256
+ default = object()
257
+ def _all_strings(self, strip=False, types=default):
258
+ """Yield all strings of certain classes, possibly stripping them.
259
+
260
+ This is implemented differently in Tag and NavigableString.
261
+ """
262
+ raise NotImplementedError()
263
+
264
+ @property
265
+ def stripped_strings(self):
266
+ """Yield all strings in this PageElement, stripping them first.
267
+
268
+ :yield: A sequence of stripped strings.
269
+ """
270
+ for string in self._all_strings(True):
271
+ yield string
272
+
273
+ def get_text(self, separator="", strip=False,
274
+ types=default):
275
+ """Get all child strings of this PageElement, concatenated using the
276
+ given separator.
277
+
278
+ :param separator: Strings will be concatenated using this separator.
279
+
280
+ :param strip: If True, strings will be stripped before being
281
+ concatenated.
282
+
283
+ :param types: A tuple of NavigableString subclasses. Any
284
+ strings of a subclass not found in this list will be
285
+ ignored. Although there are exceptions, the default
286
+ behavior in most cases is to consider only NavigableString
287
+ and CData objects. That means no comments, processing
288
+ instructions, etc.
289
+
290
+ :return: A string.
291
+ """
292
+ return separator.join([s for s in self._all_strings(
293
+ strip, types=types)])
294
+ getText = get_text
295
+ text = property(get_text)
296
+
297
+ def replace_with(self, *args):
298
+ """Replace this PageElement with one or more PageElements, keeping the
299
+ rest of the tree the same.
300
+
301
+ :param args: One or more PageElements.
302
+ :return: `self`, no longer part of the tree.
303
+ """
304
+ if self.parent is None:
305
+ raise ValueError(
306
+ "Cannot replace one element with another when the "
307
+ "element to be replaced is not part of a tree.")
308
+ if len(args) == 1 and args[0] is self:
309
+ return
310
+ if any(x is self.parent for x in args):
311
+ raise ValueError("Cannot replace a Tag with its parent.")
312
+ old_parent = self.parent
313
+ my_index = self.parent.index(self)
314
+ self.extract(_self_index=my_index)
315
+ for idx, replace_with in enumerate(args, start=my_index):
316
+ old_parent.insert(idx, replace_with)
317
+ return self
318
+ replaceWith = replace_with # BS3
319
+
320
+ def unwrap(self):
321
+ """Replace this PageElement with its contents.
322
+
323
+ :return: `self`, no longer part of the tree.
324
+ """
325
+ my_parent = self.parent
326
+ if self.parent is None:
327
+ raise ValueError(
328
+ "Cannot replace an element with its contents when that"
329
+ "element is not part of a tree.")
330
+ my_index = self.parent.index(self)
331
+ self.extract(_self_index=my_index)
332
+ for child in reversed(self.contents[:]):
333
+ my_parent.insert(my_index, child)
334
+ return self
335
+ replace_with_children = unwrap
336
+ replaceWithChildren = unwrap # BS3
337
+
338
+ def wrap(self, wrap_inside):
339
+ """Wrap this PageElement inside another one.
340
+
341
+ :param wrap_inside: A PageElement.
342
+ :return: `wrap_inside`, occupying the position in the tree that used
343
+ to be occupied by `self`, and with `self` inside it.
344
+ """
345
+ me = self.replace_with(wrap_inside)
346
+ wrap_inside.append(me)
347
+ return wrap_inside
348
+
349
+ def extract(self, _self_index=None):
350
+ """Destructively rips this element out of the tree.
351
+
352
+ :param _self_index: The location of this element in its parent's
353
+ .contents, if known. Passing this in allows for a performance
354
+ optimization.
355
+
356
+ :return: `self`, no longer part of the tree.
357
+ """
358
+ if self.parent is not None:
359
+ if _self_index is None:
360
+ _self_index = self.parent.index(self)
361
+ del self.parent.contents[_self_index]
362
+
363
+ #Find the two elements that would be next to each other if
364
+ #this element (and any children) hadn't been parsed. Connect
365
+ #the two.
366
+ last_child = self._last_descendant()
367
+ next_element = last_child.next_element
368
+
369
+ if (self.previous_element is not None and
370
+ self.previous_element is not next_element):
371
+ self.previous_element.next_element = next_element
372
+ if next_element is not None and next_element is not self.previous_element:
373
+ next_element.previous_element = self.previous_element
374
+ self.previous_element = None
375
+ last_child.next_element = None
376
+
377
+ self.parent = None
378
+ if (self.previous_sibling is not None
379
+ and self.previous_sibling is not self.next_sibling):
380
+ self.previous_sibling.next_sibling = self.next_sibling
381
+ if (self.next_sibling is not None
382
+ and self.next_sibling is not self.previous_sibling):
383
+ self.next_sibling.previous_sibling = self.previous_sibling
384
+ self.previous_sibling = self.next_sibling = None
385
+ return self
386
+
387
+ def _last_descendant(self, is_initialized=True, accept_self=True):
388
+ """Finds the last element beneath this object to be parsed.
389
+
390
+ :param is_initialized: Has `setup` been called on this PageElement
391
+ yet?
392
+ :param accept_self: Is `self` an acceptable answer to the question?
393
+ """
394
+ if is_initialized and self.next_sibling is not None:
395
+ last_child = self.next_sibling.previous_element
396
+ else:
397
+ last_child = self
398
+ while isinstance(last_child, Tag) and last_child.contents:
399
+ last_child = last_child.contents[-1]
400
+ if not accept_self and last_child is self:
401
+ last_child = None
402
+ return last_child
403
+ # BS3: Not part of the API!
404
+ _lastRecursiveChild = _last_descendant
405
+
406
+ def insert(self, position, new_child):
407
+ """Insert a new PageElement in the list of this PageElement's children.
408
+
409
+ This works the same way as `list.insert`.
410
+
411
+ :param position: The numeric position that should be occupied
412
+ in `self.children` by the new PageElement.
413
+ :param new_child: A PageElement.
414
+ """
415
+ if new_child is None:
416
+ raise ValueError("Cannot insert None into a tag.")
417
+ if new_child is self:
418
+ raise ValueError("Cannot insert a tag into itself.")
419
+ if (isinstance(new_child, str)
420
+ and not isinstance(new_child, NavigableString)):
421
+ new_child = NavigableString(new_child)
422
+
423
+ from bs4 import BeautifulSoup
424
+ if isinstance(new_child, BeautifulSoup):
425
+ # We don't want to end up with a situation where one BeautifulSoup
426
+ # object contains another. Insert the children one at a time.
427
+ for subchild in list(new_child.contents):
428
+ self.insert(position, subchild)
429
+ position += 1
430
+ return
431
+ position = min(position, len(self.contents))
432
+ if hasattr(new_child, 'parent') and new_child.parent is not None:
433
+ # We're 'inserting' an element that's already one
434
+ # of this object's children.
435
+ if new_child.parent is self:
436
+ current_index = self.index(new_child)
437
+ if current_index < position:
438
+ # We're moving this element further down the list
439
+ # of this object's children. That means that when
440
+ # we extract this element, our target index will
441
+ # jump down one.
442
+ position -= 1
443
+ new_child.extract()
444
+
445
+ new_child.parent = self
446
+ previous_child = None
447
+ if position == 0:
448
+ new_child.previous_sibling = None
449
+ new_child.previous_element = self
450
+ else:
451
+ previous_child = self.contents[position - 1]
452
+ new_child.previous_sibling = previous_child
453
+ new_child.previous_sibling.next_sibling = new_child
454
+ new_child.previous_element = previous_child._last_descendant(False)
455
+ if new_child.previous_element is not None:
456
+ new_child.previous_element.next_element = new_child
457
+
458
+ new_childs_last_element = new_child._last_descendant(False)
459
+
460
+ if position >= len(self.contents):
461
+ new_child.next_sibling = None
462
+
463
+ parent = self
464
+ parents_next_sibling = None
465
+ while parents_next_sibling is None and parent is not None:
466
+ parents_next_sibling = parent.next_sibling
467
+ parent = parent.parent
468
+ if parents_next_sibling is not None:
469
+ # We found the element that comes next in the document.
470
+ break
471
+ if parents_next_sibling is not None:
472
+ new_childs_last_element.next_element = parents_next_sibling
473
+ else:
474
+ # The last element of this tag is the last element in
475
+ # the document.
476
+ new_childs_last_element.next_element = None
477
+ else:
478
+ next_child = self.contents[position]
479
+ new_child.next_sibling = next_child
480
+ if new_child.next_sibling is not None:
481
+ new_child.next_sibling.previous_sibling = new_child
482
+ new_childs_last_element.next_element = next_child
483
+
484
+ if new_childs_last_element.next_element is not None:
485
+ new_childs_last_element.next_element.previous_element = new_childs_last_element
486
+ self.contents.insert(position, new_child)
487
+
488
+ def append(self, tag):
489
+ """Appends the given PageElement to the contents of this one.
490
+
491
+ :param tag: A PageElement.
492
+ """
493
+ self.insert(len(self.contents), tag)
494
+
495
+ def extend(self, tags):
496
+ """Appends the given PageElements to this one's contents.
497
+
498
+ :param tags: A list of PageElements. If a single Tag is
499
+ provided instead, this PageElement's contents will be extended
500
+ with that Tag's contents.
501
+ """
502
+ if isinstance(tags, Tag):
503
+ tags = tags.contents
504
+ if isinstance(tags, list):
505
+ # Moving items around the tree may change their position in
506
+ # the original list. Make a list that won't change.
507
+ tags = list(tags)
508
+ for tag in tags:
509
+ self.append(tag)
510
+
511
+ def insert_before(self, *args):
512
+ """Makes the given element(s) the immediate predecessor of this one.
513
+
514
+ All the elements will have the same parent, and the given elements
515
+ will be immediately before this one.
516
+
517
+ :param args: One or more PageElements.
518
+ """
519
+ parent = self.parent
520
+ if parent is None:
521
+ raise ValueError(
522
+ "Element has no parent, so 'before' has no meaning.")
523
+ if any(x is self for x in args):
524
+ raise ValueError("Can't insert an element before itself.")
525
+ for predecessor in args:
526
+ # Extract first so that the index won't be screwed up if they
527
+ # are siblings.
528
+ if isinstance(predecessor, PageElement):
529
+ predecessor.extract()
530
+ index = parent.index(self)
531
+ parent.insert(index, predecessor)
532
+
533
+ def insert_after(self, *args):
534
+ """Makes the given element(s) the immediate successor of this one.
535
+
536
+ The elements will have the same parent, and the given elements
537
+ will be immediately after this one.
538
+
539
+ :param args: One or more PageElements.
540
+ """
541
+ # Do all error checking before modifying the tree.
542
+ parent = self.parent
543
+ if parent is None:
544
+ raise ValueError(
545
+ "Element has no parent, so 'after' has no meaning.")
546
+ if any(x is self for x in args):
547
+ raise ValueError("Can't insert an element after itself.")
548
+
549
+ offset = 0
550
+ for successor in args:
551
+ # Extract first so that the index won't be screwed up if they
552
+ # are siblings.
553
+ if isinstance(successor, PageElement):
554
+ successor.extract()
555
+ index = parent.index(self)
556
+ parent.insert(index+1+offset, successor)
557
+ offset += 1
558
+
559
+ def find_next(self, name=None, attrs={}, string=None, **kwargs):
560
+ """Find the first PageElement that matches the given criteria and
561
+ appears later in the document than this PageElement.
562
+
563
+ All find_* methods take a common set of arguments. See the online
564
+ documentation for detailed explanations.
565
+
566
+ :param name: A filter on tag name.
567
+ :param attrs: A dictionary of filters on attribute values.
568
+ :param string: A filter for a NavigableString with specific text.
569
+ :kwargs: A dictionary of filters on attribute values.
570
+ :return: A PageElement.
571
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
572
+ """
573
+ return self._find_one(self.find_all_next, name, attrs, string, **kwargs)
574
+ findNext = find_next # BS3
575
+
576
+ def find_all_next(self, name=None, attrs={}, string=None, limit=None,
577
+ **kwargs):
578
+ """Find all PageElements that match the given criteria and appear
579
+ later in the document than this PageElement.
580
+
581
+ All find_* methods take a common set of arguments. See the online
582
+ documentation for detailed explanations.
583
+
584
+ :param name: A filter on tag name.
585
+ :param attrs: A dictionary of filters on attribute values.
586
+ :param string: A filter for a NavigableString with specific text.
587
+ :param limit: Stop looking after finding this many results.
588
+ :kwargs: A dictionary of filters on attribute values.
589
+ :return: A ResultSet containing PageElements.
590
+ """
591
+ _stacklevel = kwargs.pop('_stacklevel', 2)
592
+ return self._find_all(name, attrs, string, limit, self.next_elements,
593
+ _stacklevel=_stacklevel+1, **kwargs)
594
+ findAllNext = find_all_next # BS3
595
+
596
+ def find_next_sibling(self, name=None, attrs={}, string=None, **kwargs):
597
+ """Find the closest sibling to this PageElement that matches the
598
+ given criteria and appears later in the document.
599
+
600
+ All find_* methods take a common set of arguments. See the
601
+ online documentation for detailed explanations.
602
+
603
+ :param name: A filter on tag name.
604
+ :param attrs: A dictionary of filters on attribute values.
605
+ :param string: A filter for a NavigableString with specific text.
606
+ :kwargs: A dictionary of filters on attribute values.
607
+ :return: A PageElement.
608
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
609
+ """
610
+ return self._find_one(self.find_next_siblings, name, attrs, string,
611
+ **kwargs)
612
+ findNextSibling = find_next_sibling # BS3
613
+
614
+ def find_next_siblings(self, name=None, attrs={}, string=None, limit=None,
615
+ **kwargs):
616
+ """Find all siblings of this PageElement that match the given criteria
617
+ and appear later in the document.
618
+
619
+ All find_* methods take a common set of arguments. See the online
620
+ documentation for detailed explanations.
621
+
622
+ :param name: A filter on tag name.
623
+ :param attrs: A dictionary of filters on attribute values.
624
+ :param string: A filter for a NavigableString with specific text.
625
+ :param limit: Stop looking after finding this many results.
626
+ :kwargs: A dictionary of filters on attribute values.
627
+ :return: A ResultSet of PageElements.
628
+ :rtype: bs4.element.ResultSet
629
+ """
630
+ _stacklevel = kwargs.pop('_stacklevel', 2)
631
+ return self._find_all(
632
+ name, attrs, string, limit,
633
+ self.next_siblings, _stacklevel=_stacklevel+1, **kwargs
634
+ )
635
+ findNextSiblings = find_next_siblings # BS3
636
+ fetchNextSiblings = find_next_siblings # BS2
637
+
638
+ def find_previous(self, name=None, attrs={}, string=None, **kwargs):
639
+ """Look backwards in the document from this PageElement and find the
640
+ first PageElement that matches the given criteria.
641
+
642
+ All find_* methods take a common set of arguments. See the online
643
+ documentation for detailed explanations.
644
+
645
+ :param name: A filter on tag name.
646
+ :param attrs: A dictionary of filters on attribute values.
647
+ :param string: A filter for a NavigableString with specific text.
648
+ :kwargs: A dictionary of filters on attribute values.
649
+ :return: A PageElement.
650
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
651
+ """
652
+ return self._find_one(
653
+ self.find_all_previous, name, attrs, string, **kwargs)
654
+ findPrevious = find_previous # BS3
655
+
656
+ def find_all_previous(self, name=None, attrs={}, string=None, limit=None,
657
+ **kwargs):
658
+ """Look backwards in the document from this PageElement and find all
659
+ PageElements that match the given criteria.
660
+
661
+ All find_* methods take a common set of arguments. See the online
662
+ documentation for detailed explanations.
663
+
664
+ :param name: A filter on tag name.
665
+ :param attrs: A dictionary of filters on attribute values.
666
+ :param string: A filter for a NavigableString with specific text.
667
+ :param limit: Stop looking after finding this many results.
668
+ :kwargs: A dictionary of filters on attribute values.
669
+ :return: A ResultSet of PageElements.
670
+ :rtype: bs4.element.ResultSet
671
+ """
672
+ _stacklevel = kwargs.pop('_stacklevel', 2)
673
+ return self._find_all(
674
+ name, attrs, string, limit, self.previous_elements,
675
+ _stacklevel=_stacklevel+1, **kwargs
676
+ )
677
+ findAllPrevious = find_all_previous # BS3
678
+ fetchPrevious = find_all_previous # BS2
679
+
680
+ def find_previous_sibling(self, name=None, attrs={}, string=None, **kwargs):
681
+ """Returns the closest sibling to this PageElement that matches the
682
+ given criteria and appears earlier in the document.
683
+
684
+ All find_* methods take a common set of arguments. See the online
685
+ documentation for detailed explanations.
686
+
687
+ :param name: A filter on tag name.
688
+ :param attrs: A dictionary of filters on attribute values.
689
+ :param string: A filter for a NavigableString with specific text.
690
+ :kwargs: A dictionary of filters on attribute values.
691
+ :return: A PageElement.
692
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
693
+ """
694
+ return self._find_one(self.find_previous_siblings, name, attrs, string,
695
+ **kwargs)
696
+ findPreviousSibling = find_previous_sibling # BS3
697
+
698
+ def find_previous_siblings(self, name=None, attrs={}, string=None,
699
+ limit=None, **kwargs):
700
+ """Returns all siblings to this PageElement that match the
701
+ given criteria and appear earlier in the document.
702
+
703
+ All find_* methods take a common set of arguments. See the online
704
+ documentation for detailed explanations.
705
+
706
+ :param name: A filter on tag name.
707
+ :param attrs: A dictionary of filters on attribute values.
708
+ :param string: A filter for a NavigableString with specific text.
709
+ :param limit: Stop looking after finding this many results.
710
+ :kwargs: A dictionary of filters on attribute values.
711
+ :return: A ResultSet of PageElements.
712
+ :rtype: bs4.element.ResultSet
713
+ """
714
+ _stacklevel = kwargs.pop('_stacklevel', 2)
715
+ return self._find_all(
716
+ name, attrs, string, limit,
717
+ self.previous_siblings, _stacklevel=_stacklevel+1, **kwargs
718
+ )
719
+ findPreviousSiblings = find_previous_siblings # BS3
720
+ fetchPreviousSiblings = find_previous_siblings # BS2
721
+
722
+ def find_parent(self, name=None, attrs={}, **kwargs):
723
+ """Find the closest parent of this PageElement that matches the given
724
+ criteria.
725
+
726
+ All find_* methods take a common set of arguments. See the online
727
+ documentation for detailed explanations.
728
+
729
+ :param name: A filter on tag name.
730
+ :param attrs: A dictionary of filters on attribute values.
731
+ :kwargs: A dictionary of filters on attribute values.
732
+
733
+ :return: A PageElement.
734
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
735
+ """
736
+ # NOTE: We can't use _find_one because findParents takes a different
737
+ # set of arguments.
738
+ r = None
739
+ l = self.find_parents(name, attrs, 1, _stacklevel=3, **kwargs)
740
+ if l:
741
+ r = l[0]
742
+ return r
743
+ findParent = find_parent # BS3
744
+
745
+ def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
746
+ """Find all parents of this PageElement that match the given criteria.
747
+
748
+ All find_* methods take a common set of arguments. See the online
749
+ documentation for detailed explanations.
750
+
751
+ :param name: A filter on tag name.
752
+ :param attrs: A dictionary of filters on attribute values.
753
+ :param limit: Stop looking after finding this many results.
754
+ :kwargs: A dictionary of filters on attribute values.
755
+
756
+ :return: A PageElement.
757
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
758
+ """
759
+ _stacklevel = kwargs.pop('_stacklevel', 2)
760
+ return self._find_all(name, attrs, None, limit, self.parents,
761
+ _stacklevel=_stacklevel+1, **kwargs)
762
+ findParents = find_parents # BS3
763
+ fetchParents = find_parents # BS2
764
+
765
+ @property
766
+ def next(self):
767
+ """The PageElement, if any, that was parsed just after this one.
768
+
769
+ :return: A PageElement.
770
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
771
+ """
772
+ return self.next_element
773
+
774
+ @property
775
+ def previous(self):
776
+ """The PageElement, if any, that was parsed just before this one.
777
+
778
+ :return: A PageElement.
779
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
780
+ """
781
+ return self.previous_element
782
+
783
+ #These methods do the real heavy lifting.
784
+
785
+ def _find_one(self, method, name, attrs, string, **kwargs):
786
+ r = None
787
+ l = method(name, attrs, string, 1, _stacklevel=4, **kwargs)
788
+ if l:
789
+ r = l[0]
790
+ return r
791
+
792
+ def _find_all(self, name, attrs, string, limit, generator, **kwargs):
793
+ "Iterates over a generator looking for things that match."
794
+ _stacklevel = kwargs.pop('_stacklevel', 3)
795
+
796
+ if string is None and 'text' in kwargs:
797
+ string = kwargs.pop('text')
798
+ warnings.warn(
799
+ "The 'text' argument to find()-type methods is deprecated. Use 'string' instead.",
800
+ DeprecationWarning, stacklevel=_stacklevel
801
+ )
802
+
803
+ if isinstance(name, SoupStrainer):
804
+ strainer = name
805
+ else:
806
+ strainer = SoupStrainer(name, attrs, string, **kwargs)
807
+
808
+ if string is None and not limit and not attrs and not kwargs:
809
+ if name is True or name is None:
810
+ # Optimization to find all tags.
811
+ result = (element for element in generator
812
+ if isinstance(element, Tag))
813
+ return ResultSet(strainer, result)
814
+ elif isinstance(name, str):
815
+ # Optimization to find all tags with a given name.
816
+ if name.count(':') == 1:
817
+ # This is a name with a prefix. If this is a namespace-aware document,
818
+ # we need to match the local name against tag.name. If not,
819
+ # we need to match the fully-qualified name against tag.name.
820
+ prefix, local_name = name.split(':', 1)
821
+ else:
822
+ prefix = None
823
+ local_name = name
824
+ result = (element for element in generator
825
+ if isinstance(element, Tag)
826
+ and (
827
+ element.name == name
828
+ ) or (
829
+ element.name == local_name
830
+ and (prefix is None or element.prefix == prefix)
831
+ )
832
+ )
833
+ return ResultSet(strainer, result)
834
+ results = ResultSet(strainer)
835
+ while True:
836
+ try:
837
+ i = next(generator)
838
+ except StopIteration:
839
+ break
840
+ if i:
841
+ found = strainer.search(i)
842
+ if found:
843
+ results.append(found)
844
+ if limit and len(results) >= limit:
845
+ break
846
+ return results
847
+
848
+ #These generators can be used to navigate starting from both
849
+ #NavigableStrings and Tags.
850
+ @property
851
+ def next_elements(self):
852
+ """All PageElements that were parsed after this one.
853
+
854
+ :yield: A sequence of PageElements.
855
+ """
856
+ i = self.next_element
857
+ while i is not None:
858
+ yield i
859
+ i = i.next_element
860
+
861
+ @property
862
+ def next_siblings(self):
863
+ """All PageElements that are siblings of this one but were parsed
864
+ later.
865
+
866
+ :yield: A sequence of PageElements.
867
+ """
868
+ i = self.next_sibling
869
+ while i is not None:
870
+ yield i
871
+ i = i.next_sibling
872
+
873
+ @property
874
+ def previous_elements(self):
875
+ """All PageElements that were parsed before this one.
876
+
877
+ :yield: A sequence of PageElements.
878
+ """
879
+ i = self.previous_element
880
+ while i is not None:
881
+ yield i
882
+ i = i.previous_element
883
+
884
+ @property
885
+ def previous_siblings(self):
886
+ """All PageElements that are siblings of this one but were parsed
887
+ earlier.
888
+
889
+ :yield: A sequence of PageElements.
890
+ """
891
+ i = self.previous_sibling
892
+ while i is not None:
893
+ yield i
894
+ i = i.previous_sibling
895
+
896
+ @property
897
+ def parents(self):
898
+ """All PageElements that are parents of this PageElement.
899
+
900
+ :yield: A sequence of PageElements.
901
+ """
902
+ i = self.parent
903
+ while i is not None:
904
+ yield i
905
+ i = i.parent
906
+
907
+ @property
908
+ def decomposed(self):
909
+ """Check whether a PageElement has been decomposed.
910
+
911
+ :rtype: bool
912
+ """
913
+ return getattr(self, '_decomposed', False) or False
914
+
915
+ # Old non-property versions of the generators, for backwards
916
+ # compatibility with BS3.
917
+ def nextGenerator(self):
918
+ return self.next_elements
919
+
920
+ def nextSiblingGenerator(self):
921
+ return self.next_siblings
922
+
923
+ def previousGenerator(self):
924
+ return self.previous_elements
925
+
926
+ def previousSiblingGenerator(self):
927
+ return self.previous_siblings
928
+
929
+ def parentGenerator(self):
930
+ return self.parents
931
+
932
+
933
+ class NavigableString(str, PageElement):
934
+ """A Python Unicode string that is part of a parse tree.
935
+
936
+ When Beautiful Soup parses the markup <b>penguin</b>, it will
937
+ create a NavigableString for the string "penguin".
938
+ """
939
+
940
+ PREFIX = ''
941
+ SUFFIX = ''
942
+
943
+ def __new__(cls, value):
944
+ """Create a new NavigableString.
945
+
946
+ When unpickling a NavigableString, this method is called with
947
+ the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
948
+ passed in to the superclass's __new__ or the superclass won't know
949
+ how to handle non-ASCII characters.
950
+ """
951
+ if isinstance(value, str):
952
+ u = str.__new__(cls, value)
953
+ else:
954
+ u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
955
+ u.setup()
956
+ return u
957
+
958
+ def __deepcopy__(self, memo, recursive=False):
959
+ """A copy of a NavigableString has the same contents and class
960
+ as the original, but it is not connected to the parse tree.
961
+
962
+ :param recursive: This parameter is ignored; it's only defined
963
+ so that NavigableString.__deepcopy__ implements the same
964
+ signature as Tag.__deepcopy__.
965
+ """
966
+ return type(self)(self)
967
+
968
+ def __copy__(self):
969
+ """A copy of a NavigableString can only be a deep copy, because
970
+ only one PageElement can occupy a given place in a parse tree.
971
+ """
972
+ return self.__deepcopy__({})
973
+
974
+ def __getnewargs__(self):
975
+ return (str(self),)
976
+
977
+ def __getattr__(self, attr):
978
+ """text.string gives you text. This is for backwards
979
+ compatibility for Navigable*String, but for CData* it lets you
980
+ get the string without the CData wrapper."""
981
+ if attr == 'string':
982
+ return self
983
+ else:
984
+ raise AttributeError(
985
+ "'%s' object has no attribute '%s'" % (
986
+ self.__class__.__name__, attr))
987
+
988
+ def output_ready(self, formatter="minimal"):
989
+ """Run the string through the provided formatter.
990
+
991
+ :param formatter: A Formatter object, or a string naming one of the standard formatters.
992
+ """
993
+ output = self.format_string(self, formatter)
994
+ return self.PREFIX + output + self.SUFFIX
995
+
996
+ @property
997
+ def name(self):
998
+ """Since a NavigableString is not a Tag, it has no .name.
999
+
1000
+ This property is implemented so that code like this doesn't crash
1001
+ when run on a mixture of Tag and NavigableString objects:
1002
+ [x.name for x in tag.children]
1003
+ """
1004
+ return None
1005
+
1006
+ @name.setter
1007
+ def name(self, name):
1008
+ """Prevent NavigableString.name from ever being set."""
1009
+ raise AttributeError("A NavigableString cannot be given a name.")
1010
+
1011
+ def _all_strings(self, strip=False, types=PageElement.default):
1012
+ """Yield all strings of certain classes, possibly stripping them.
1013
+
1014
+ This makes it easy for NavigableString to implement methods
1015
+ like get_text() as conveniences, creating a consistent
1016
+ text-extraction API across all PageElements.
1017
+
1018
+ :param strip: If True, all strings will be stripped before being
1019
+ yielded.
1020
+
1021
+ :param types: A tuple of NavigableString subclasses. If this
1022
+ NavigableString isn't one of those subclasses, the
1023
+ sequence will be empty. By default, the subclasses
1024
+ considered are NavigableString and CData objects. That
1025
+ means no comments, processing instructions, etc.
1026
+
1027
+ :yield: A sequence that either contains this string, or is empty.
1028
+
1029
+ """
1030
+ if types is self.default:
1031
+ # This is kept in Tag because it's full of subclasses of
1032
+ # this class, which aren't defined until later in the file.
1033
+ types = Tag.DEFAULT_INTERESTING_STRING_TYPES
1034
+
1035
+ # Do nothing if the caller is looking for specific types of
1036
+ # string, and we're of a different type.
1037
+ #
1038
+ # We check specific types instead of using isinstance(self,
1039
+ # types) because all of these classes subclass
1040
+ # NavigableString. Anyone who's using this feature probably
1041
+ # wants generic NavigableStrings but not other stuff.
1042
+ my_type = type(self)
1043
+ if types is not None:
1044
+ if isinstance(types, type):
1045
+ # Looking for a single type.
1046
+ if my_type is not types:
1047
+ return
1048
+ elif my_type not in types:
1049
+ # Looking for one of a list of types.
1050
+ return
1051
+
1052
+ value = self
1053
+ if strip:
1054
+ value = value.strip()
1055
+ if len(value) > 0:
1056
+ yield value
1057
+ strings = property(_all_strings)
1058
+
1059
+ class PreformattedString(NavigableString):
1060
+ """A NavigableString not subject to the normal formatting rules.
1061
+
1062
+ This is an abstract class used for special kinds of strings such
1063
+ as comments (the Comment class) and CDATA blocks (the CData
1064
+ class).
1065
+ """
1066
+
1067
+ PREFIX = ''
1068
+ SUFFIX = ''
1069
+
1070
+ def output_ready(self, formatter=None):
1071
+ """Make this string ready for output by adding any subclass-specific
1072
+ prefix or suffix.
1073
+
1074
+ :param formatter: A Formatter object, or a string naming one
1075
+ of the standard formatters. The string will be passed into the
1076
+ Formatter, but only to trigger any side effects: the return
1077
+ value is ignored.
1078
+
1079
+ :return: The string, with any subclass-specific prefix and
1080
+ suffix added on.
1081
+ """
1082
+ if formatter is not None:
1083
+ ignore = self.format_string(self, formatter)
1084
+ return self.PREFIX + self + self.SUFFIX
1085
+
1086
+ class CData(PreformattedString):
1087
+ """A CDATA block."""
1088
+ PREFIX = '<![CDATA['
1089
+ SUFFIX = ']]>'
1090
+
1091
+ class ProcessingInstruction(PreformattedString):
1092
+ """A SGML processing instruction."""
1093
+
1094
+ PREFIX = '<?'
1095
+ SUFFIX = '>'
1096
+
1097
+ class XMLProcessingInstruction(ProcessingInstruction):
1098
+ """An XML processing instruction."""
1099
+ PREFIX = '<?'
1100
+ SUFFIX = '?>'
1101
+
1102
+ class Comment(PreformattedString):
1103
+ """An HTML or XML comment."""
1104
+ PREFIX = '<!--'
1105
+ SUFFIX = '-->'
1106
+
1107
+
1108
+ class Declaration(PreformattedString):
1109
+ """An XML declaration."""
1110
+ PREFIX = '<?'
1111
+ SUFFIX = '?>'
1112
+
1113
+
1114
+ class Doctype(PreformattedString):
1115
+ """A document type declaration."""
1116
+ @classmethod
1117
+ def for_name_and_ids(cls, name, pub_id, system_id):
1118
+ """Generate an appropriate document type declaration for a given
1119
+ public ID and system ID.
1120
+
1121
+ :param name: The name of the document's root element, e.g. 'html'.
1122
+ :param pub_id: The Formal Public Identifier for this document type,
1123
+ e.g. '-//W3C//DTD XHTML 1.1//EN'
1124
+ :param system_id: The system identifier for this document type,
1125
+ e.g. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
1126
+
1127
+ :return: A Doctype.
1128
+ """
1129
+ value = name or ''
1130
+ if pub_id is not None:
1131
+ value += ' PUBLIC "%s"' % pub_id
1132
+ if system_id is not None:
1133
+ value += ' "%s"' % system_id
1134
+ elif system_id is not None:
1135
+ value += ' SYSTEM "%s"' % system_id
1136
+
1137
+ return Doctype(value)
1138
+
1139
+ PREFIX = '<!DOCTYPE '
1140
+ SUFFIX = '>\n'
1141
+
1142
+
1143
+ class Stylesheet(NavigableString):
1144
+ """A NavigableString representing an stylesheet (probably
1145
+ CSS).
1146
+
1147
+ Used to distinguish embedded stylesheets from textual content.
1148
+ """
1149
+ pass
1150
+
1151
+
1152
+ class Script(NavigableString):
1153
+ """A NavigableString representing an executable script (probably
1154
+ Javascript).
1155
+
1156
+ Used to distinguish executable code from textual content.
1157
+ """
1158
+ pass
1159
+
1160
+
1161
+ class TemplateString(NavigableString):
1162
+ """A NavigableString representing a string found inside an HTML
1163
+ template embedded in a larger document.
1164
+
1165
+ Used to distinguish such strings from the main body of the document.
1166
+ """
1167
+ pass
1168
+
1169
+
1170
+ class RubyTextString(NavigableString):
1171
+ """A NavigableString representing the contents of the <rt> HTML
1172
+ element.
1173
+
1174
+ https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rt-element
1175
+
1176
+ Can be used to distinguish such strings from the strings they're
1177
+ annotating.
1178
+ """
1179
+ pass
1180
+
1181
+
1182
+ class RubyParenthesisString(NavigableString):
1183
+ """A NavigableString representing the contents of the <rp> HTML
1184
+ element.
1185
+
1186
+ https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rp-element
1187
+ """
1188
+ pass
1189
+
1190
+
1191
+ class Tag(PageElement):
1192
+ """Represents an HTML or XML tag that is part of a parse tree, along
1193
+ with its attributes and contents.
1194
+
1195
+ When Beautiful Soup parses the markup <b>penguin</b>, it will
1196
+ create a Tag object representing the <b> tag.
1197
+ """
1198
+
1199
+ def __init__(self, parser=None, builder=None, name=None, namespace=None,
1200
+ prefix=None, attrs=None, parent=None, previous=None,
1201
+ is_xml=None, sourceline=None, sourcepos=None,
1202
+ can_be_empty_element=None, cdata_list_attributes=None,
1203
+ preserve_whitespace_tags=None,
1204
+ interesting_string_types=None,
1205
+ namespaces=None
1206
+ ):
1207
+ """Basic constructor.
1208
+
1209
+ :param parser: A BeautifulSoup object.
1210
+ :param builder: A TreeBuilder.
1211
+ :param name: The name of the tag.
1212
+ :param namespace: The URI of this Tag's XML namespace, if any.
1213
+ :param prefix: The prefix for this Tag's XML namespace, if any.
1214
+ :param attrs: A dictionary of this Tag's attribute values.
1215
+ :param parent: The PageElement to use as this Tag's parent.
1216
+ :param previous: The PageElement that was parsed immediately before
1217
+ this tag.
1218
+ :param is_xml: If True, this is an XML tag. Otherwise, this is an
1219
+ HTML tag.
1220
+ :param sourceline: The line number where this tag was found in its
1221
+ source document.
1222
+ :param sourcepos: The character position within `sourceline` where this
1223
+ tag was found.
1224
+ :param can_be_empty_element: If True, this tag should be
1225
+ represented as <tag/>. If False, this tag should be represented
1226
+ as <tag></tag>.
1227
+ :param cdata_list_attributes: A list of attributes whose values should
1228
+ be treated as CDATA if they ever show up on this tag.
1229
+ :param preserve_whitespace_tags: A list of tag names whose contents
1230
+ should have their whitespace preserved.
1231
+ :param interesting_string_types: This is a NavigableString
1232
+ subclass or a tuple of them. When iterating over this
1233
+ Tag's strings in methods like Tag.strings or Tag.get_text,
1234
+ these are the types of strings that are interesting enough
1235
+ to be considered. The default is to consider
1236
+ NavigableString and CData the only interesting string
1237
+ subtypes.
1238
+ :param namespaces: A dictionary mapping currently active
1239
+ namespace prefixes to URIs. This can be used later to
1240
+ construct CSS selectors.
1241
+ """
1242
+ if parser is None:
1243
+ self.parser_class = None
1244
+ else:
1245
+ # We don't actually store the parser object: that lets extracted
1246
+ # chunks be garbage-collected.
1247
+ self.parser_class = parser.__class__
1248
+ if name is None:
1249
+ raise ValueError("No value provided for new tag's name.")
1250
+ self.name = name
1251
+ self.namespace = namespace
1252
+ self._namespaces = namespaces or {}
1253
+ self.prefix = prefix
1254
+ if ((not builder or builder.store_line_numbers)
1255
+ and (sourceline is not None or sourcepos is not None)):
1256
+ self.sourceline = sourceline
1257
+ self.sourcepos = sourcepos
1258
+ if attrs is None:
1259
+ attrs = {}
1260
+ elif attrs:
1261
+ if builder is not None and builder.cdata_list_attributes:
1262
+ attrs = builder._replace_cdata_list_attribute_values(
1263
+ self.name, attrs)
1264
+ else:
1265
+ attrs = dict(attrs)
1266
+ else:
1267
+ attrs = dict(attrs)
1268
+
1269
+ # If possible, determine ahead of time whether this tag is an
1270
+ # XML tag.
1271
+ if builder:
1272
+ self.known_xml = builder.is_xml
1273
+ else:
1274
+ self.known_xml = is_xml
1275
+ self.attrs = attrs
1276
+ self.contents = []
1277
+ self.setup(parent, previous)
1278
+ self.hidden = False
1279
+
1280
+ if builder is None:
1281
+ # In the absence of a TreeBuilder, use whatever values were
1282
+ # passed in here. They're probably None, unless this is a copy of some
1283
+ # other tag.
1284
+ self.can_be_empty_element = can_be_empty_element
1285
+ self.cdata_list_attributes = cdata_list_attributes
1286
+ self.preserve_whitespace_tags = preserve_whitespace_tags
1287
+ self.interesting_string_types = interesting_string_types
1288
+ else:
1289
+ # Set up any substitutions for this tag, such as the charset in a META tag.
1290
+ builder.set_up_substitutions(self)
1291
+
1292
+ # Ask the TreeBuilder whether this tag might be an empty-element tag.
1293
+ self.can_be_empty_element = builder.can_be_empty_element(name)
1294
+
1295
+ # Keep track of the list of attributes of this tag that
1296
+ # might need to be treated as a list.
1297
+ #
1298
+ # For performance reasons, we store the whole data structure
1299
+ # rather than asking the question of every tag. Asking would
1300
+ # require building a new data structure every time, and
1301
+ # (unlike can_be_empty_element), we almost never need
1302
+ # to check this.
1303
+ self.cdata_list_attributes = builder.cdata_list_attributes
1304
+
1305
+ # Keep track of the names that might cause this tag to be treated as a
1306
+ # whitespace-preserved tag.
1307
+ self.preserve_whitespace_tags = builder.preserve_whitespace_tags
1308
+
1309
+ if self.name in builder.string_containers:
1310
+ # This sort of tag uses a special string container
1311
+ # subclass for most of its strings. When we ask the
1312
+ self.interesting_string_types = builder.string_containers[self.name]
1313
+ else:
1314
+ self.interesting_string_types = self.DEFAULT_INTERESTING_STRING_TYPES
1315
+
1316
+ parserClass = _alias("parser_class") # BS3
1317
+
1318
+ def __deepcopy__(self, memo, recursive=True):
1319
+ """A deepcopy of a Tag is a new Tag, unconnected to the parse tree.
1320
+ Its contents are a copy of the old Tag's contents.
1321
+ """
1322
+ clone = self._clone()
1323
+
1324
+ if recursive:
1325
+ # Clone this tag's descendants recursively, but without
1326
+ # making any recursive function calls.
1327
+ tag_stack = [clone]
1328
+ for event, element in self._event_stream(self.descendants):
1329
+ if event is Tag.END_ELEMENT_EVENT:
1330
+ # Stop appending incoming Tags to the Tag that was
1331
+ # just closed.
1332
+ tag_stack.pop()
1333
+ else:
1334
+ descendant_clone = element.__deepcopy__(
1335
+ memo, recursive=False
1336
+ )
1337
+ # Add to its parent's .contents
1338
+ tag_stack[-1].append(descendant_clone)
1339
+
1340
+ if event is Tag.START_ELEMENT_EVENT:
1341
+ # Add the Tag itself to the stack so that its
1342
+ # children will be .appended to it.
1343
+ tag_stack.append(descendant_clone)
1344
+ return clone
1345
+
1346
+ def __copy__(self):
1347
+ """A copy of a Tag must always be a deep copy, because a Tag's
1348
+ children can only have one parent at a time.
1349
+ """
1350
+ return self.__deepcopy__({})
1351
+
1352
+ def _clone(self):
1353
+ """Create a new Tag just like this one, but with no
1354
+ contents and unattached to any parse tree.
1355
+
1356
+ This is the first step in the deepcopy process.
1357
+ """
1358
+ clone = type(self)(
1359
+ None, None, self.name, self.namespace,
1360
+ self.prefix, self.attrs, is_xml=self._is_xml,
1361
+ sourceline=self.sourceline, sourcepos=self.sourcepos,
1362
+ can_be_empty_element=self.can_be_empty_element,
1363
+ cdata_list_attributes=self.cdata_list_attributes,
1364
+ preserve_whitespace_tags=self.preserve_whitespace_tags,
1365
+ interesting_string_types=self.interesting_string_types
1366
+ )
1367
+ for attr in ('can_be_empty_element', 'hidden'):
1368
+ setattr(clone, attr, getattr(self, attr))
1369
+ return clone
1370
+
1371
+ @property
1372
+ def is_empty_element(self):
1373
+ """Is this tag an empty-element tag? (aka a self-closing tag)
1374
+
1375
+ A tag that has contents is never an empty-element tag.
1376
+
1377
+ A tag that has no contents may or may not be an empty-element
1378
+ tag. It depends on the builder used to create the tag. If the
1379
+ builder has a designated list of empty-element tags, then only
1380
+ a tag whose name shows up in that list is considered an
1381
+ empty-element tag.
1382
+
1383
+ If the builder has no designated list of empty-element tags,
1384
+ then any tag with no contents is an empty-element tag.
1385
+ """
1386
+ return len(self.contents) == 0 and self.can_be_empty_element
1387
+ isSelfClosing = is_empty_element # BS3
1388
+
1389
+ @property
1390
+ def string(self):
1391
+ """Convenience property to get the single string within this
1392
+ PageElement.
1393
+
1394
+ TODO It might make sense to have NavigableString.string return
1395
+ itself.
1396
+
1397
+ :return: If this element has a single string child, return
1398
+ value is that string. If this element has one child tag,
1399
+ return value is the 'string' attribute of the child tag,
1400
+ recursively. If this element is itself a string, has no
1401
+ children, or has more than one child, return value is None.
1402
+ """
1403
+ if len(self.contents) != 1:
1404
+ return None
1405
+ child = self.contents[0]
1406
+ if isinstance(child, NavigableString):
1407
+ return child
1408
+ return child.string
1409
+
1410
+ @string.setter
1411
+ def string(self, string):
1412
+ """Replace this PageElement's contents with `string`."""
1413
+ self.clear()
1414
+ self.append(string.__class__(string))
1415
+
1416
+ DEFAULT_INTERESTING_STRING_TYPES = (NavigableString, CData)
1417
+ def _all_strings(self, strip=False, types=PageElement.default):
1418
+ """Yield all strings of certain classes, possibly stripping them.
1419
+
1420
+ :param strip: If True, all strings will be stripped before being
1421
+ yielded.
1422
+
1423
+ :param types: A tuple of NavigableString subclasses. Any strings of
1424
+ a subclass not found in this list will be ignored. By
1425
+ default, the subclasses considered are the ones found in
1426
+ self.interesting_string_types. If that's not specified,
1427
+ only NavigableString and CData objects will be
1428
+ considered. That means no comments, processing
1429
+ instructions, etc.
1430
+
1431
+ :yield: A sequence of strings.
1432
+
1433
+ """
1434
+ if types is self.default:
1435
+ types = self.interesting_string_types
1436
+
1437
+ for descendant in self.descendants:
1438
+ if (types is None and not isinstance(descendant, NavigableString)):
1439
+ continue
1440
+ descendant_type = type(descendant)
1441
+ if isinstance(types, type):
1442
+ if descendant_type is not types:
1443
+ # We're not interested in strings of this type.
1444
+ continue
1445
+ elif types is not None and descendant_type not in types:
1446
+ # We're not interested in strings of this type.
1447
+ continue
1448
+ if strip:
1449
+ descendant = descendant.strip()
1450
+ if len(descendant) == 0:
1451
+ continue
1452
+ yield descendant
1453
+ strings = property(_all_strings)
1454
+
1455
+ def decompose(self):
1456
+ """Recursively destroys this PageElement and its children.
1457
+
1458
+ This element will be removed from the tree and wiped out; so
1459
+ will everything beneath it.
1460
+
1461
+ The behavior of a decomposed PageElement is undefined and you
1462
+ should never use one for anything, but if you need to _check_
1463
+ whether an element has been decomposed, you can use the
1464
+ `decomposed` property.
1465
+ """
1466
+ self.extract()
1467
+ i = self
1468
+ while i is not None:
1469
+ n = i.next_element
1470
+ i.__dict__.clear()
1471
+ i.contents = []
1472
+ i._decomposed = True
1473
+ i = n
1474
+
1475
+ def clear(self, decompose=False):
1476
+ """Wipe out all children of this PageElement by calling extract()
1477
+ on them.
1478
+
1479
+ :param decompose: If this is True, decompose() (a more
1480
+ destructive method) will be called instead of extract().
1481
+ """
1482
+ if decompose:
1483
+ for element in self.contents[:]:
1484
+ if isinstance(element, Tag):
1485
+ element.decompose()
1486
+ else:
1487
+ element.extract()
1488
+ else:
1489
+ for element in self.contents[:]:
1490
+ element.extract()
1491
+
1492
+ def smooth(self):
1493
+ """Smooth out this element's children by consolidating consecutive
1494
+ strings.
1495
+
1496
+ This makes pretty-printed output look more natural following a
1497
+ lot of operations that modified the tree.
1498
+ """
1499
+ # Mark the first position of every pair of children that need
1500
+ # to be consolidated. Do this rather than making a copy of
1501
+ # self.contents, since in most cases very few strings will be
1502
+ # affected.
1503
+ marked = []
1504
+ for i, a in enumerate(self.contents):
1505
+ if isinstance(a, Tag):
1506
+ # Recursively smooth children.
1507
+ a.smooth()
1508
+ if i == len(self.contents)-1:
1509
+ # This is the last item in .contents, and it's not a
1510
+ # tag. There's no chance it needs any work.
1511
+ continue
1512
+ b = self.contents[i+1]
1513
+ if (isinstance(a, NavigableString)
1514
+ and isinstance(b, NavigableString)
1515
+ and not isinstance(a, PreformattedString)
1516
+ and not isinstance(b, PreformattedString)
1517
+ ):
1518
+ marked.append(i)
1519
+
1520
+ # Go over the marked positions in reverse order, so that
1521
+ # removing items from .contents won't affect the remaining
1522
+ # positions.
1523
+ for i in reversed(marked):
1524
+ a = self.contents[i]
1525
+ b = self.contents[i+1]
1526
+ b.extract()
1527
+ n = NavigableString(a+b)
1528
+ a.replace_with(n)
1529
+
1530
+ def index(self, element):
1531
+ """Find the index of a child by identity, not value.
1532
+
1533
+ Avoids issues with tag.contents.index(element) getting the
1534
+ index of equal elements.
1535
+
1536
+ :param element: Look for this PageElement in `self.contents`.
1537
+ """
1538
+ for i, child in enumerate(self.contents):
1539
+ if child is element:
1540
+ return i
1541
+ raise ValueError("Tag.index: element not in tag")
1542
+
1543
+ def get(self, key, default=None):
1544
+ """Returns the value of the 'key' attribute for the tag, or
1545
+ the value given for 'default' if it doesn't have that
1546
+ attribute."""
1547
+ return self.attrs.get(key, default)
1548
+
1549
+ def get_attribute_list(self, key, default=None):
1550
+ """The same as get(), but always returns a list.
1551
+
1552
+ :param key: The attribute to look for.
1553
+ :param default: Use this value if the attribute is not present
1554
+ on this PageElement.
1555
+ :return: A list of values, probably containing only a single
1556
+ value.
1557
+ """
1558
+ value = self.get(key, default)
1559
+ if not isinstance(value, list):
1560
+ value = [value]
1561
+ return value
1562
+
1563
+ def has_attr(self, key):
1564
+ """Does this PageElement have an attribute with the given name?"""
1565
+ return key in self.attrs
1566
+
1567
+ def __hash__(self):
1568
+ return str(self).__hash__()
1569
+
1570
+ def __getitem__(self, key):
1571
+ """tag[key] returns the value of the 'key' attribute for the Tag,
1572
+ and throws an exception if it's not there."""
1573
+ return self.attrs[key]
1574
+
1575
+ def __iter__(self):
1576
+ "Iterating over a Tag iterates over its contents."
1577
+ return iter(self.contents)
1578
+
1579
+ def __len__(self):
1580
+ "The length of a Tag is the length of its list of contents."
1581
+ return len(self.contents)
1582
+
1583
+ def __contains__(self, x):
1584
+ return x in self.contents
1585
+
1586
+ def __bool__(self):
1587
+ "A tag is non-None even if it has no contents."
1588
+ return True
1589
+
1590
+ def __setitem__(self, key, value):
1591
+ """Setting tag[key] sets the value of the 'key' attribute for the
1592
+ tag."""
1593
+ self.attrs[key] = value
1594
+
1595
+ def __delitem__(self, key):
1596
+ "Deleting tag[key] deletes all 'key' attributes for the tag."
1597
+ self.attrs.pop(key, None)
1598
+
1599
+ def __call__(self, *args, **kwargs):
1600
+ """Calling a Tag like a function is the same as calling its
1601
+ find_all() method. Eg. tag('a') returns a list of all the A tags
1602
+ found within this tag."""
1603
+ return self.find_all(*args, **kwargs)
1604
+
1605
+ def __getattr__(self, tag):
1606
+ """Calling tag.subtag is the same as calling tag.find(name="subtag")"""
1607
+ #print("Getattr %s.%s" % (self.__class__, tag))
1608
+ if len(tag) > 3 and tag.endswith('Tag'):
1609
+ # BS3: soup.aTag -> "soup.find("a")
1610
+ tag_name = tag[:-3]
1611
+ warnings.warn(
1612
+ '.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' % dict(
1613
+ name=tag_name
1614
+ ),
1615
+ DeprecationWarning, stacklevel=2
1616
+ )
1617
+ return self.find(tag_name)
1618
+ # We special case contents to avoid recursion.
1619
+ elif not tag.startswith("__") and not tag == "contents":
1620
+ return self.find(tag)
1621
+ raise AttributeError(
1622
+ "'%s' object has no attribute '%s'" % (self.__class__, tag))
1623
+
1624
+ def __eq__(self, other):
1625
+ """Returns true iff this Tag has the same name, the same attributes,
1626
+ and the same contents (recursively) as `other`."""
1627
+ if self is other:
1628
+ return True
1629
+ if (not hasattr(other, 'name') or
1630
+ not hasattr(other, 'attrs') or
1631
+ not hasattr(other, 'contents') or
1632
+ self.name != other.name or
1633
+ self.attrs != other.attrs or
1634
+ len(self) != len(other)):
1635
+ return False
1636
+ for i, my_child in enumerate(self.contents):
1637
+ if my_child != other.contents[i]:
1638
+ return False
1639
+ return True
1640
+
1641
+ def __ne__(self, other):
1642
+ """Returns true iff this Tag is not identical to `other`,
1643
+ as defined in __eq__."""
1644
+ return not self == other
1645
+
1646
+ def __repr__(self, encoding="unicode-escape"):
1647
+ """Renders this PageElement as a string.
1648
+
1649
+ :param encoding: The encoding to use (Python 2 only).
1650
+ TODO: This is now ignored and a warning should be issued
1651
+ if a value is provided.
1652
+ :return: A (Unicode) string.
1653
+ """
1654
+ # "The return value must be a string object", i.e. Unicode
1655
+ return self.decode()
1656
+
1657
+ def __unicode__(self):
1658
+ """Renders this PageElement as a Unicode string."""
1659
+ return self.decode()
1660
+
1661
+ __str__ = __repr__ = __unicode__
1662
+
1663
+ def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
1664
+ indent_level=None, formatter="minimal",
1665
+ errors="xmlcharrefreplace"):
1666
+ """Render a bytestring representation of this PageElement and its
1667
+ contents.
1668
+
1669
+ :param encoding: The destination encoding.
1670
+ :param indent_level: Each line of the rendering will be
1671
+ indented this many levels. (The formatter decides what a
1672
+ 'level' means in terms of spaces or other characters
1673
+ output.) Used internally in recursive calls while
1674
+ pretty-printing.
1675
+ :param formatter: A Formatter object, or a string naming one of
1676
+ the standard formatters.
1677
+ :param errors: An error handling strategy such as
1678
+ 'xmlcharrefreplace'. This value is passed along into
1679
+ encode() and its value should be one of the constants
1680
+ defined by Python.
1681
+ :return: A bytestring.
1682
+
1683
+ """
1684
+ # Turn the data structure into Unicode, then encode the
1685
+ # Unicode.
1686
+ u = self.decode(indent_level, encoding, formatter)
1687
+ return u.encode(encoding, errors)
1688
+
1689
+ def decode(self, indent_level=None,
1690
+ eventual_encoding=DEFAULT_OUTPUT_ENCODING,
1691
+ formatter="minimal",
1692
+ iterator=None):
1693
+ pieces = []
1694
+ # First off, turn a non-Formatter `formatter` into a Formatter
1695
+ # object. This will stop the lookup from happening over and
1696
+ # over again.
1697
+ if not isinstance(formatter, Formatter):
1698
+ formatter = self.formatter_for_name(formatter)
1699
+
1700
+ if indent_level is True:
1701
+ indent_level = 0
1702
+
1703
+ # The currently active tag that put us into string literal
1704
+ # mode. Until this element is closed, children will be treated
1705
+ # as string literals and not pretty-printed. String literal
1706
+ # mode is turned on immediately after this tag begins, and
1707
+ # turned off immediately before it's closed. This means there
1708
+ # will be whitespace before and after the tag itself.
1709
+ string_literal_tag = None
1710
+
1711
+ for event, element in self._event_stream(iterator):
1712
+ if event in (Tag.START_ELEMENT_EVENT, Tag.EMPTY_ELEMENT_EVENT):
1713
+ piece = element._format_tag(
1714
+ eventual_encoding, formatter, opening=True
1715
+ )
1716
+ elif event is Tag.END_ELEMENT_EVENT:
1717
+ piece = element._format_tag(
1718
+ eventual_encoding, formatter, opening=False
1719
+ )
1720
+ if indent_level is not None:
1721
+ indent_level -= 1
1722
+ else:
1723
+ piece = element.output_ready(formatter)
1724
+
1725
+ # Now we need to apply the 'prettiness' -- extra
1726
+ # whitespace before and/or after this tag. This can get
1727
+ # complicated because certain tags, like <pre> and
1728
+ # <script>, can't be prettified, since adding whitespace would
1729
+ # change the meaning of the content.
1730
+
1731
+ # The default behavior is to add whitespace before and
1732
+ # after an element when string literal mode is off, and to
1733
+ # leave things as they are when string literal mode is on.
1734
+ if string_literal_tag:
1735
+ indent_before = indent_after = False
1736
+ else:
1737
+ indent_before = indent_after = True
1738
+
1739
+ # The only time the behavior is more complex than that is
1740
+ # when we encounter an opening or closing tag that might
1741
+ # put us into or out of string literal mode.
1742
+ if (event is Tag.START_ELEMENT_EVENT
1743
+ and not string_literal_tag
1744
+ and not element._should_pretty_print()):
1745
+ # We are about to enter string literal mode. Add
1746
+ # whitespace before this tag, but not after. We
1747
+ # will stay in string literal mode until this tag
1748
+ # is closed.
1749
+ indent_before = True
1750
+ indent_after = False
1751
+ string_literal_tag = element
1752
+ elif (event is Tag.END_ELEMENT_EVENT
1753
+ and element is string_literal_tag):
1754
+ # We are about to exit string literal mode by closing
1755
+ # the tag that sent us into that mode. Add whitespace
1756
+ # after this tag, but not before.
1757
+ indent_before = False
1758
+ indent_after = True
1759
+ string_literal_tag = None
1760
+
1761
+ # Now we know whether to add whitespace before and/or
1762
+ # after this element.
1763
+ if indent_level is not None:
1764
+ if (indent_before or indent_after):
1765
+ if isinstance(element, NavigableString):
1766
+ piece = piece.strip()
1767
+ if piece:
1768
+ piece = self._indent_string(
1769
+ piece, indent_level, formatter,
1770
+ indent_before, indent_after
1771
+ )
1772
+ if event == Tag.START_ELEMENT_EVENT:
1773
+ indent_level += 1
1774
+ pieces.append(piece)
1775
+ return "".join(pieces)
1776
+
1777
+ # Names for the different events yielded by _event_stream
1778
+ START_ELEMENT_EVENT = object()
1779
+ END_ELEMENT_EVENT = object()
1780
+ EMPTY_ELEMENT_EVENT = object()
1781
+ STRING_ELEMENT_EVENT = object()
1782
+
1783
+ def _event_stream(self, iterator=None):
1784
+ """Yield a sequence of events that can be used to reconstruct the DOM
1785
+ for this element.
1786
+
1787
+ This lets us recreate the nested structure of this element
1788
+ (e.g. when formatting it as a string) without using recursive
1789
+ method calls.
1790
+
1791
+ This is similar in concept to the SAX API, but it's a simpler
1792
+ interface designed for internal use. The events are different
1793
+ from SAX and the arguments associated with the events are Tags
1794
+ and other Beautiful Soup objects.
1795
+
1796
+ :param iterator: An alternate iterator to use when traversing
1797
+ the tree.
1798
+ """
1799
+ tag_stack = []
1800
+
1801
+ iterator = iterator or self.self_and_descendants
1802
+
1803
+ for c in iterator:
1804
+ # If the parent of the element we're about to yield is not
1805
+ # the tag currently on the stack, it means that the tag on
1806
+ # the stack closed before this element appeared.
1807
+ while tag_stack and c.parent != tag_stack[-1]:
1808
+ now_closed_tag = tag_stack.pop()
1809
+ yield Tag.END_ELEMENT_EVENT, now_closed_tag
1810
+
1811
+ if isinstance(c, Tag):
1812
+ if c.is_empty_element:
1813
+ yield Tag.EMPTY_ELEMENT_EVENT, c
1814
+ else:
1815
+ yield Tag.START_ELEMENT_EVENT, c
1816
+ tag_stack.append(c)
1817
+ continue
1818
+ else:
1819
+ yield Tag.STRING_ELEMENT_EVENT, c
1820
+
1821
+ while tag_stack:
1822
+ now_closed_tag = tag_stack.pop()
1823
+ yield Tag.END_ELEMENT_EVENT, now_closed_tag
1824
+
1825
+ def _indent_string(self, s, indent_level, formatter,
1826
+ indent_before, indent_after):
1827
+ """Add indentation whitespace before and/or after a string.
1828
+
1829
+ :param s: The string to amend with whitespace.
1830
+ :param indent_level: The indentation level; affects how much
1831
+ whitespace goes before the string.
1832
+ :param indent_before: Whether or not to add whitespace
1833
+ before the string.
1834
+ :param indent_after: Whether or not to add whitespace
1835
+ (a newline) after the string.
1836
+ """
1837
+ space_before = ''
1838
+ if indent_before and indent_level:
1839
+ space_before = (formatter.indent * indent_level)
1840
+
1841
+ space_after = ''
1842
+ if indent_after:
1843
+ space_after = "\n"
1844
+
1845
+ return space_before + s + space_after
1846
+
1847
+ def _format_tag(self, eventual_encoding, formatter, opening):
1848
+ if self.hidden:
1849
+ # A hidden tag is invisible, although its contents
1850
+ # are visible.
1851
+ return ''
1852
+
1853
+ # A tag starts with the < character (see below).
1854
+
1855
+ # Then the / character, if this is a closing tag.
1856
+ closing_slash = ''
1857
+ if not opening:
1858
+ closing_slash = '/'
1859
+
1860
+ # Then an optional namespace prefix.
1861
+ prefix = ''
1862
+ if self.prefix:
1863
+ prefix = self.prefix + ":"
1864
+
1865
+ # Then a list of attribute values, if this is an opening tag.
1866
+ attribute_string = ''
1867
+ if opening:
1868
+ attributes = formatter.attributes(self)
1869
+ attrs = []
1870
+ for key, val in attributes:
1871
+ if val is None:
1872
+ decoded = key
1873
+ else:
1874
+ if isinstance(val, list) or isinstance(val, tuple):
1875
+ val = ' '.join(val)
1876
+ elif not isinstance(val, str):
1877
+ val = str(val)
1878
+ elif (
1879
+ isinstance(val, AttributeValueWithCharsetSubstitution)
1880
+ and eventual_encoding is not None
1881
+ ):
1882
+ val = val.encode(eventual_encoding)
1883
+
1884
+ text = formatter.attribute_value(val)
1885
+ decoded = (
1886
+ str(key) + '='
1887
+ + formatter.quoted_attribute_value(text))
1888
+ attrs.append(decoded)
1889
+ if attrs:
1890
+ attribute_string = ' ' + ' '.join(attrs)
1891
+
1892
+ # Then an optional closing slash (for a void element in an
1893
+ # XML document).
1894
+ void_element_closing_slash = ''
1895
+ if self.is_empty_element:
1896
+ void_element_closing_slash = formatter.void_element_close_prefix or ''
1897
+
1898
+ # Put it all together.
1899
+ return '<' + closing_slash + prefix + self.name + attribute_string + void_element_closing_slash + '>'
1900
+
1901
+ def _should_pretty_print(self, indent_level=1):
1902
+ """Should this tag be pretty-printed?
1903
+
1904
+ Most of them should, but some (such as <pre> in HTML
1905
+ documents) should not.
1906
+ """
1907
+ return (
1908
+ indent_level is not None
1909
+ and (
1910
+ not self.preserve_whitespace_tags
1911
+ or self.name not in self.preserve_whitespace_tags
1912
+ )
1913
+ )
1914
+
1915
+ def prettify(self, encoding=None, formatter="minimal"):
1916
+ """Pretty-print this PageElement as a string.
1917
+
1918
+ :param encoding: The eventual encoding of the string. If this is None,
1919
+ a Unicode string will be returned.
1920
+ :param formatter: A Formatter object, or a string naming one of
1921
+ the standard formatters.
1922
+ :return: A Unicode string (if encoding==None) or a bytestring
1923
+ (otherwise).
1924
+ """
1925
+ if encoding is None:
1926
+ return self.decode(True, formatter=formatter)
1927
+ else:
1928
+ return self.encode(encoding, True, formatter=formatter)
1929
+
1930
+ def decode_contents(self, indent_level=None,
1931
+ eventual_encoding=DEFAULT_OUTPUT_ENCODING,
1932
+ formatter="minimal"):
1933
+ """Renders the contents of this tag as a Unicode string.
1934
+
1935
+ :param indent_level: Each line of the rendering will be
1936
+ indented this many levels. (The formatter decides what a
1937
+ 'level' means in terms of spaces or other characters
1938
+ output.) Used internally in recursive calls while
1939
+ pretty-printing.
1940
+
1941
+ :param eventual_encoding: The tag is destined to be
1942
+ encoded into this encoding. decode_contents() is _not_
1943
+ responsible for performing that encoding. This information
1944
+ is passed in so that it can be substituted in if the
1945
+ document contains a <META> tag that mentions the document's
1946
+ encoding.
1947
+
1948
+ :param formatter: A Formatter object, or a string naming one of
1949
+ the standard Formatters.
1950
+
1951
+ """
1952
+ return self.decode(indent_level, eventual_encoding, formatter,
1953
+ iterator=self.descendants)
1954
+
1955
+ def encode_contents(
1956
+ self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
1957
+ formatter="minimal"):
1958
+ """Renders the contents of this PageElement as a bytestring.
1959
+
1960
+ :param indent_level: Each line of the rendering will be
1961
+ indented this many levels. (The formatter decides what a
1962
+ 'level' means in terms of spaces or other characters
1963
+ output.) Used internally in recursive calls while
1964
+ pretty-printing.
1965
+
1966
+ :param eventual_encoding: The bytestring will be in this encoding.
1967
+
1968
+ :param formatter: A Formatter object, or a string naming one of
1969
+ the standard Formatters.
1970
+
1971
+ :return: A bytestring.
1972
+ """
1973
+ contents = self.decode_contents(indent_level, encoding, formatter)
1974
+ return contents.encode(encoding)
1975
+
1976
+ # Old method for BS3 compatibility
1977
+ def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
1978
+ prettyPrint=False, indentLevel=0):
1979
+ """Deprecated method for BS3 compatibility."""
1980
+ if not prettyPrint:
1981
+ indentLevel = None
1982
+ return self.encode_contents(
1983
+ indent_level=indentLevel, encoding=encoding)
1984
+
1985
+ #Soup methods
1986
+
1987
+ def find(self, name=None, attrs={}, recursive=True, string=None,
1988
+ **kwargs):
1989
+ """Look in the children of this PageElement and find the first
1990
+ PageElement that matches the given criteria.
1991
+
1992
+ All find_* methods take a common set of arguments. See the online
1993
+ documentation for detailed explanations.
1994
+
1995
+ :param name: A filter on tag name.
1996
+ :param attrs: A dictionary of filters on attribute values.
1997
+ :param recursive: If this is True, find() will perform a
1998
+ recursive search of this PageElement's children. Otherwise,
1999
+ only the direct children will be considered.
2000
+ :param limit: Stop looking after finding this many results.
2001
+ :kwargs: A dictionary of filters on attribute values.
2002
+ :return: A PageElement.
2003
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
2004
+ """
2005
+ r = None
2006
+ l = self.find_all(name, attrs, recursive, string, 1, _stacklevel=3,
2007
+ **kwargs)
2008
+ if l:
2009
+ r = l[0]
2010
+ return r
2011
+ findChild = find #BS2
2012
+
2013
+ def find_all(self, name=None, attrs={}, recursive=True, string=None,
2014
+ limit=None, **kwargs):
2015
+ """Look in the children of this PageElement and find all
2016
+ PageElements that match the given criteria.
2017
+
2018
+ All find_* methods take a common set of arguments. See the online
2019
+ documentation for detailed explanations.
2020
+
2021
+ :param name: A filter on tag name.
2022
+ :param attrs: A dictionary of filters on attribute values.
2023
+ :param recursive: If this is True, find_all() will perform a
2024
+ recursive search of this PageElement's children. Otherwise,
2025
+ only the direct children will be considered.
2026
+ :param limit: Stop looking after finding this many results.
2027
+ :kwargs: A dictionary of filters on attribute values.
2028
+ :return: A ResultSet of PageElements.
2029
+ :rtype: bs4.element.ResultSet
2030
+ """
2031
+ generator = self.descendants
2032
+ if not recursive:
2033
+ generator = self.children
2034
+ _stacklevel = kwargs.pop('_stacklevel', 2)
2035
+ return self._find_all(name, attrs, string, limit, generator,
2036
+ _stacklevel=_stacklevel+1, **kwargs)
2037
+ findAll = find_all # BS3
2038
+ findChildren = find_all # BS2
2039
+
2040
+ #Generator methods
2041
+ @property
2042
+ def children(self):
2043
+ """Iterate over all direct children of this PageElement.
2044
+
2045
+ :yield: A sequence of PageElements.
2046
+ """
2047
+ # return iter() to make the purpose of the method clear
2048
+ return iter(self.contents) # XXX This seems to be untested.
2049
+
2050
+ @property
2051
+ def self_and_descendants(self):
2052
+ """Iterate over this PageElement and its children in a
2053
+ breadth-first sequence.
2054
+
2055
+ :yield: A sequence of PageElements.
2056
+ """
2057
+ if not self.hidden:
2058
+ yield self
2059
+ for i in self.descendants:
2060
+ yield i
2061
+
2062
+ @property
2063
+ def descendants(self):
2064
+ """Iterate over all children of this PageElement in a
2065
+ breadth-first sequence.
2066
+
2067
+ :yield: A sequence of PageElements.
2068
+ """
2069
+ if not len(self.contents):
2070
+ return
2071
+ stopNode = self._last_descendant().next_element
2072
+ current = self.contents[0]
2073
+ while current is not stopNode:
2074
+ yield current
2075
+ current = current.next_element
2076
+
2077
+ # CSS selector code
2078
+ def select_one(self, selector, namespaces=None, **kwargs):
2079
+ """Perform a CSS selection operation on the current element.
2080
+
2081
+ :param selector: A CSS selector.
2082
+
2083
+ :param namespaces: A dictionary mapping namespace prefixes
2084
+ used in the CSS selector to namespace URIs. By default,
2085
+ Beautiful Soup will use the prefixes it encountered while
2086
+ parsing the document.
2087
+
2088
+ :param kwargs: Keyword arguments to be passed into Soup Sieve's
2089
+ soupsieve.select() method.
2090
+
2091
+ :return: A Tag.
2092
+ :rtype: bs4.element.Tag
2093
+ """
2094
+ return self.css.select_one(selector, namespaces, **kwargs)
2095
+
2096
+ def select(self, selector, namespaces=None, limit=None, **kwargs):
2097
+ """Perform a CSS selection operation on the current element.
2098
+
2099
+ This uses the SoupSieve library.
2100
+
2101
+ :param selector: A string containing a CSS selector.
2102
+
2103
+ :param namespaces: A dictionary mapping namespace prefixes
2104
+ used in the CSS selector to namespace URIs. By default,
2105
+ Beautiful Soup will use the prefixes it encountered while
2106
+ parsing the document.
2107
+
2108
+ :param limit: After finding this number of results, stop looking.
2109
+
2110
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
2111
+ soupsieve.select() method.
2112
+
2113
+ :return: A ResultSet of Tags.
2114
+ :rtype: bs4.element.ResultSet
2115
+ """
2116
+ return self.css.select(selector, namespaces, limit, **kwargs)
2117
+
2118
+ @property
2119
+ def css(self):
2120
+ """Return an interface to the CSS selector API."""
2121
+ return CSS(self)
2122
+
2123
+ # Old names for backwards compatibility
2124
+ def childGenerator(self):
2125
+ """Deprecated generator."""
2126
+ return self.children
2127
+
2128
+ def recursiveChildGenerator(self):
2129
+ """Deprecated generator."""
2130
+ return self.descendants
2131
+
2132
+ def has_key(self, key):
2133
+ """Deprecated method. This was kind of misleading because has_key()
2134
+ (attributes) was different from __in__ (contents).
2135
+
2136
+ has_key() is gone in Python 3, anyway.
2137
+ """
2138
+ warnings.warn(
2139
+ 'has_key is deprecated. Use has_attr(key) instead.',
2140
+ DeprecationWarning, stacklevel=2
2141
+ )
2142
+ return self.has_attr(key)
2143
+
2144
+ # Next, a couple classes to represent queries and their results.
2145
+ class SoupStrainer(object):
2146
+ """Encapsulates a number of ways of matching a markup element (tag or
2147
+ string).
2148
+
2149
+ This is primarily used to underpin the find_* methods, but you can
2150
+ create one yourself and pass it in as `parse_only` to the
2151
+ `BeautifulSoup` constructor, to parse a subset of a large
2152
+ document.
2153
+ """
2154
+
2155
+ def __init__(self, name=None, attrs={}, string=None, **kwargs):
2156
+ """Constructor.
2157
+
2158
+ The SoupStrainer constructor takes the same arguments passed
2159
+ into the find_* methods. See the online documentation for
2160
+ detailed explanations.
2161
+
2162
+ :param name: A filter on tag name.
2163
+ :param attrs: A dictionary of filters on attribute values.
2164
+ :param string: A filter for a NavigableString with specific text.
2165
+ :kwargs: A dictionary of filters on attribute values.
2166
+ """
2167
+ if string is None and 'text' in kwargs:
2168
+ string = kwargs.pop('text')
2169
+ warnings.warn(
2170
+ "The 'text' argument to the SoupStrainer constructor is deprecated. Use 'string' instead.",
2171
+ DeprecationWarning, stacklevel=2
2172
+ )
2173
+
2174
+ self.name = self._normalize_search_value(name)
2175
+ if not isinstance(attrs, dict):
2176
+ # Treat a non-dict value for attrs as a search for the 'class'
2177
+ # attribute.
2178
+ kwargs['class'] = attrs
2179
+ attrs = None
2180
+
2181
+ if 'class_' in kwargs:
2182
+ # Treat class_="foo" as a search for the 'class'
2183
+ # attribute, overriding any non-dict value for attrs.
2184
+ kwargs['class'] = kwargs['class_']
2185
+ del kwargs['class_']
2186
+
2187
+ if kwargs:
2188
+ if attrs:
2189
+ attrs = attrs.copy()
2190
+ attrs.update(kwargs)
2191
+ else:
2192
+ attrs = kwargs
2193
+ normalized_attrs = {}
2194
+ for key, value in list(attrs.items()):
2195
+ normalized_attrs[key] = self._normalize_search_value(value)
2196
+
2197
+ self.attrs = normalized_attrs
2198
+ self.string = self._normalize_search_value(string)
2199
+
2200
+ # DEPRECATED but just in case someone is checking this.
2201
+ self.text = self.string
2202
+
2203
+ def _normalize_search_value(self, value):
2204
+ # Leave it alone if it's a Unicode string, a callable, a
2205
+ # regular expression, a boolean, or None.
2206
+ if (isinstance(value, str) or isinstance(value, Callable) or hasattr(value, 'match')
2207
+ or isinstance(value, bool) or value is None):
2208
+ return value
2209
+
2210
+ # If it's a bytestring, convert it to Unicode, treating it as UTF-8.
2211
+ if isinstance(value, bytes):
2212
+ return value.decode("utf8")
2213
+
2214
+ # If it's listlike, convert it into a list of strings.
2215
+ if hasattr(value, '__iter__'):
2216
+ new_value = []
2217
+ for v in value:
2218
+ if (hasattr(v, '__iter__') and not isinstance(v, bytes)
2219
+ and not isinstance(v, str)):
2220
+ # This is almost certainly the user's mistake. In the
2221
+ # interests of avoiding infinite loops, we'll let
2222
+ # it through as-is rather than doing a recursive call.
2223
+ new_value.append(v)
2224
+ else:
2225
+ new_value.append(self._normalize_search_value(v))
2226
+ return new_value
2227
+
2228
+ # Otherwise, convert it into a Unicode string.
2229
+ # The unicode(str()) thing is so this will do the same thing on Python 2
2230
+ # and Python 3.
2231
+ return str(str(value))
2232
+
2233
+ def __str__(self):
2234
+ """A human-readable representation of this SoupStrainer."""
2235
+ if self.string:
2236
+ return self.string
2237
+ else:
2238
+ return "%s|%s" % (self.name, self.attrs)
2239
+
2240
+ def search_tag(self, markup_name=None, markup_attrs={}):
2241
+ """Check whether a Tag with the given name and attributes would
2242
+ match this SoupStrainer.
2243
+
2244
+ Used prospectively to decide whether to even bother creating a Tag
2245
+ object.
2246
+
2247
+ :param markup_name: A tag name as found in some markup.
2248
+ :param markup_attrs: A dictionary of attributes as found in some markup.
2249
+
2250
+ :return: True if the prospective tag would match this SoupStrainer;
2251
+ False otherwise.
2252
+ """
2253
+ found = None
2254
+ markup = None
2255
+ if isinstance(markup_name, Tag):
2256
+ markup = markup_name
2257
+ markup_attrs = markup
2258
+
2259
+ if isinstance(self.name, str):
2260
+ # Optimization for a very common case where the user is
2261
+ # searching for a tag with one specific name, and we're
2262
+ # looking at a tag with a different name.
2263
+ if markup and not markup.prefix and self.name != markup.name:
2264
+ return False
2265
+
2266
+ call_function_with_tag_data = (
2267
+ isinstance(self.name, Callable)
2268
+ and not isinstance(markup_name, Tag))
2269
+
2270
+ if ((not self.name)
2271
+ or call_function_with_tag_data
2272
+ or (markup and self._matches(markup, self.name))
2273
+ or (not markup and self._matches(markup_name, self.name))):
2274
+ if call_function_with_tag_data:
2275
+ match = self.name(markup_name, markup_attrs)
2276
+ else:
2277
+ match = True
2278
+ markup_attr_map = None
2279
+ for attr, match_against in list(self.attrs.items()):
2280
+ if not markup_attr_map:
2281
+ if hasattr(markup_attrs, 'get'):
2282
+ markup_attr_map = markup_attrs
2283
+ else:
2284
+ markup_attr_map = {}
2285
+ for k, v in markup_attrs:
2286
+ markup_attr_map[k] = v
2287
+ attr_value = markup_attr_map.get(attr)
2288
+ if not self._matches(attr_value, match_against):
2289
+ match = False
2290
+ break
2291
+ if match:
2292
+ if markup:
2293
+ found = markup
2294
+ else:
2295
+ found = markup_name
2296
+ if found and self.string and not self._matches(found.string, self.string):
2297
+ found = None
2298
+ return found
2299
+
2300
+ # For BS3 compatibility.
2301
+ searchTag = search_tag
2302
+
2303
+ def search(self, markup):
2304
+ """Find all items in `markup` that match this SoupStrainer.
2305
+
2306
+ Used by the core _find_all() method, which is ultimately
2307
+ called by all find_* methods.
2308
+
2309
+ :param markup: A PageElement or a list of them.
2310
+ """
2311
+ # print('looking for %s in %s' % (self, markup))
2312
+ found = None
2313
+ # If given a list of items, scan it for a text element that
2314
+ # matches.
2315
+ if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)):
2316
+ for element in markup:
2317
+ if isinstance(element, NavigableString) \
2318
+ and self.search(element):
2319
+ found = element
2320
+ break
2321
+ # If it's a Tag, make sure its name or attributes match.
2322
+ # Don't bother with Tags if we're searching for text.
2323
+ elif isinstance(markup, Tag):
2324
+ if not self.string or self.name or self.attrs:
2325
+ found = self.search_tag(markup)
2326
+ # If it's text, make sure the text matches.
2327
+ elif isinstance(markup, NavigableString) or \
2328
+ isinstance(markup, str):
2329
+ if not self.name and not self.attrs and self._matches(markup, self.string):
2330
+ found = markup
2331
+ else:
2332
+ raise Exception(
2333
+ "I don't know how to match against a %s" % markup.__class__)
2334
+ return found
2335
+
2336
+ def _matches(self, markup, match_against, already_tried=None):
2337
+ # print(u"Matching %s against %s" % (markup, match_against))
2338
+ result = False
2339
+ if isinstance(markup, list) or isinstance(markup, tuple):
2340
+ # This should only happen when searching a multi-valued attribute
2341
+ # like 'class'.
2342
+ for item in markup:
2343
+ if self._matches(item, match_against):
2344
+ return True
2345
+ # We didn't match any particular value of the multivalue
2346
+ # attribute, but maybe we match the attribute value when
2347
+ # considered as a string.
2348
+ if self._matches(' '.join(markup), match_against):
2349
+ return True
2350
+ return False
2351
+
2352
+ if match_against is True:
2353
+ # True matches any non-None value.
2354
+ return markup is not None
2355
+
2356
+ if isinstance(match_against, Callable):
2357
+ return match_against(markup)
2358
+
2359
+ # Custom callables take the tag as an argument, but all
2360
+ # other ways of matching match the tag name as a string.
2361
+ original_markup = markup
2362
+ if isinstance(markup, Tag):
2363
+ markup = markup.name
2364
+
2365
+ # Ensure that `markup` is either a Unicode string, or None.
2366
+ markup = self._normalize_search_value(markup)
2367
+
2368
+ if markup is None:
2369
+ # None matches None, False, an empty string, an empty list, and so on.
2370
+ return not match_against
2371
+
2372
+ if (hasattr(match_against, '__iter__')
2373
+ and not isinstance(match_against, str)):
2374
+ # We're asked to match against an iterable of items.
2375
+ # The markup must be match at least one item in the
2376
+ # iterable. We'll try each one in turn.
2377
+ #
2378
+ # To avoid infinite recursion we need to keep track of
2379
+ # items we've already seen.
2380
+ if not already_tried:
2381
+ already_tried = set()
2382
+ for item in match_against:
2383
+ if item.__hash__:
2384
+ key = item
2385
+ else:
2386
+ key = id(item)
2387
+ if key in already_tried:
2388
+ continue
2389
+ else:
2390
+ already_tried.add(key)
2391
+ if self._matches(original_markup, item, already_tried):
2392
+ return True
2393
+ else:
2394
+ return False
2395
+
2396
+ # Beyond this point we might need to run the test twice: once against
2397
+ # the tag's name and once against its prefixed name.
2398
+ match = False
2399
+
2400
+ if not match and isinstance(match_against, str):
2401
+ # Exact string match
2402
+ match = markup == match_against
2403
+
2404
+ if not match and hasattr(match_against, 'search'):
2405
+ # Regexp match
2406
+ return match_against.search(markup)
2407
+
2408
+ if (not match
2409
+ and isinstance(original_markup, Tag)
2410
+ and original_markup.prefix):
2411
+ # Try the whole thing again with the prefixed tag name.
2412
+ return self._matches(
2413
+ original_markup.prefix + ':' + original_markup.name, match_against
2414
+ )
2415
+
2416
+ return match
2417
+
2418
+
2419
+ class ResultSet(list):
2420
+ """A ResultSet is just a list that keeps track of the SoupStrainer
2421
+ that created it."""
2422
+ def __init__(self, source, result=()):
2423
+ """Constructor.
2424
+
2425
+ :param source: A SoupStrainer.
2426
+ :param result: A list of PageElements.
2427
+ """
2428
+ super(ResultSet, self).__init__(result)
2429
+ self.source = source
2430
+
2431
+ def __getattr__(self, key):
2432
+ """Raise a helpful exception to explain a common code fix."""
2433
+ raise AttributeError(
2434
+ "ResultSet object has no attribute '%s'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?" % key
2435
+ )
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/formatter.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4.dammit import EntitySubstitution
2
+
3
+ class Formatter(EntitySubstitution):
4
+ """Describes a strategy to use when outputting a parse tree to a string.
5
+
6
+ Some parts of this strategy come from the distinction between
7
+ HTML4, HTML5, and XML. Others are configurable by the user.
8
+
9
+ Formatters are passed in as the `formatter` argument to methods
10
+ like `PageElement.encode`. Most people won't need to think about
11
+ formatters, and most people who need to think about them can pass
12
+ in one of these predefined strings as `formatter` rather than
13
+ making a new Formatter object:
14
+
15
+ For HTML documents:
16
+ * 'html' - HTML entity substitution for generic HTML documents. (default)
17
+ * 'html5' - HTML entity substitution for HTML5 documents, as
18
+ well as some optimizations in the way tags are rendered.
19
+ * 'minimal' - Only make the substitutions necessary to guarantee
20
+ valid HTML.
21
+ * None - Do not perform any substitution. This will be faster
22
+ but may result in invalid markup.
23
+
24
+ For XML documents:
25
+ * 'html' - Entity substitution for XHTML documents.
26
+ * 'minimal' - Only make the substitutions necessary to guarantee
27
+ valid XML. (default)
28
+ * None - Do not perform any substitution. This will be faster
29
+ but may result in invalid markup.
30
+ """
31
+ # Registries of XML and HTML formatters.
32
+ XML_FORMATTERS = {}
33
+ HTML_FORMATTERS = {}
34
+
35
+ HTML = 'html'
36
+ XML = 'xml'
37
+
38
+ HTML_DEFAULTS = dict(
39
+ cdata_containing_tags=set(["script", "style"]),
40
+ )
41
+
42
+ def _default(self, language, value, kwarg):
43
+ if value is not None:
44
+ return value
45
+ if language == self.XML:
46
+ return set()
47
+ return self.HTML_DEFAULTS[kwarg]
48
+
49
+ def __init__(
50
+ self, language=None, entity_substitution=None,
51
+ void_element_close_prefix='/', cdata_containing_tags=None,
52
+ empty_attributes_are_booleans=False, indent=1,
53
+ ):
54
+ r"""Constructor.
55
+
56
+ :param language: This should be Formatter.XML if you are formatting
57
+ XML markup and Formatter.HTML if you are formatting HTML markup.
58
+
59
+ :param entity_substitution: A function to call to replace special
60
+ characters with XML/HTML entities. For examples, see
61
+ bs4.dammit.EntitySubstitution.substitute_html and substitute_xml.
62
+ :param void_element_close_prefix: By default, void elements
63
+ are represented as <tag/> (XML rules) rather than <tag>
64
+ (HTML rules). To get <tag>, pass in the empty string.
65
+ :param cdata_containing_tags: The list of tags that are defined
66
+ as containing CDATA in this dialect. For example, in HTML,
67
+ <script> and <style> tags are defined as containing CDATA,
68
+ and their contents should not be formatted.
69
+ :param blank_attributes_are_booleans: Render attributes whose value
70
+ is the empty string as HTML-style boolean attributes.
71
+ (Attributes whose value is None are always rendered this way.)
72
+
73
+ :param indent: If indent is a non-negative integer or string,
74
+ then the contents of elements will be indented
75
+ appropriately when pretty-printing. An indent level of 0,
76
+ negative, or "" will only insert newlines. Using a
77
+ positive integer indent indents that many spaces per
78
+ level. If indent is a string (such as "\t"), that string
79
+ is used to indent each level. The default behavior is to
80
+ indent one space per level.
81
+ """
82
+ self.language = language
83
+ self.entity_substitution = entity_substitution
84
+ self.void_element_close_prefix = void_element_close_prefix
85
+ self.cdata_containing_tags = self._default(
86
+ language, cdata_containing_tags, 'cdata_containing_tags'
87
+ )
88
+ self.empty_attributes_are_booleans=empty_attributes_are_booleans
89
+ if indent is None:
90
+ indent = 0
91
+ if isinstance(indent, int):
92
+ if indent < 0:
93
+ indent = 0
94
+ indent = ' ' * indent
95
+ elif isinstance(indent, str):
96
+ indent = indent
97
+ else:
98
+ indent = ' '
99
+ self.indent = indent
100
+
101
+ def substitute(self, ns):
102
+ """Process a string that needs to undergo entity substitution.
103
+ This may be a string encountered in an attribute value or as
104
+ text.
105
+
106
+ :param ns: A string.
107
+ :return: A string with certain characters replaced by named
108
+ or numeric entities.
109
+ """
110
+ if not self.entity_substitution:
111
+ return ns
112
+ from .element import NavigableString
113
+ if (isinstance(ns, NavigableString)
114
+ and ns.parent is not None
115
+ and ns.parent.name in self.cdata_containing_tags):
116
+ # Do nothing.
117
+ return ns
118
+ # Substitute.
119
+ return self.entity_substitution(ns)
120
+
121
+ def attribute_value(self, value):
122
+ """Process the value of an attribute.
123
+
124
+ :param ns: A string.
125
+ :return: A string with certain characters replaced by named
126
+ or numeric entities.
127
+ """
128
+ return self.substitute(value)
129
+
130
+ def attributes(self, tag):
131
+ """Reorder a tag's attributes however you want.
132
+
133
+ By default, attributes are sorted alphabetically. This makes
134
+ behavior consistent between Python 2 and Python 3, and preserves
135
+ backwards compatibility with older versions of Beautiful Soup.
136
+
137
+ If `empty_boolean_attributes` is True, then attributes whose
138
+ values are set to the empty string will be treated as boolean
139
+ attributes.
140
+ """
141
+ if tag.attrs is None:
142
+ return []
143
+ return sorted(
144
+ (k, (None if self.empty_attributes_are_booleans and v == '' else v))
145
+ for k, v in list(tag.attrs.items())
146
+ )
147
+
148
+ class HTMLFormatter(Formatter):
149
+ """A generic Formatter for HTML."""
150
+ REGISTRY = {}
151
+ def __init__(self, *args, **kwargs):
152
+ super(HTMLFormatter, self).__init__(self.HTML, *args, **kwargs)
153
+
154
+
155
+ class XMLFormatter(Formatter):
156
+ """A generic Formatter for XML."""
157
+ REGISTRY = {}
158
+ def __init__(self, *args, **kwargs):
159
+ super(XMLFormatter, self).__init__(self.XML, *args, **kwargs)
160
+
161
+
162
+ # Set up aliases for the default formatters.
163
+ HTMLFormatter.REGISTRY['html'] = HTMLFormatter(
164
+ entity_substitution=EntitySubstitution.substitute_html
165
+ )
166
+ HTMLFormatter.REGISTRY["html5"] = HTMLFormatter(
167
+ entity_substitution=EntitySubstitution.substitute_html,
168
+ void_element_close_prefix=None,
169
+ empty_attributes_are_booleans=True,
170
+ )
171
+ HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter(
172
+ entity_substitution=EntitySubstitution.substitute_xml
173
+ )
174
+ HTMLFormatter.REGISTRY[None] = HTMLFormatter(
175
+ entity_substitution=None
176
+ )
177
+ XMLFormatter.REGISTRY["html"] = XMLFormatter(
178
+ entity_substitution=EntitySubstitution.substitute_html
179
+ )
180
+ XMLFormatter.REGISTRY["minimal"] = XMLFormatter(
181
+ entity_substitution=EntitySubstitution.substitute_xml
182
+ )
183
+ XMLFormatter.REGISTRY[None] = Formatter(
184
+ Formatter(Formatter.XML, entity_substitution=None)
185
+ )
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/tests/__init__.py ADDED
@@ -0,0 +1,1177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding: utf-8
2
+ """Helper classes for tests."""
3
+
4
+ # Use of this source code is governed by the MIT license.
5
+ __license__ = "MIT"
6
+
7
+ import pickle
8
+ import copy
9
+ import functools
10
+ import warnings
11
+ import pytest
12
+ from bs4 import BeautifulSoup
13
+ from bs4.element import (
14
+ CharsetMetaAttributeValue,
15
+ Comment,
16
+ ContentMetaAttributeValue,
17
+ Doctype,
18
+ PYTHON_SPECIFIC_ENCODINGS,
19
+ SoupStrainer,
20
+ Script,
21
+ Stylesheet,
22
+ Tag
23
+ )
24
+
25
+ from bs4.builder import (
26
+ DetectsXMLParsedAsHTML,
27
+ HTMLParserTreeBuilder,
28
+ XMLParsedAsHTMLWarning,
29
+ )
30
+ default_builder = HTMLParserTreeBuilder
31
+
32
+ # Some tests depend on specific third-party libraries. We use
33
+ # @pytest.mark.skipIf on the following conditionals to skip them
34
+ # if the libraries are not installed.
35
+ try:
36
+ from soupsieve import SelectorSyntaxError
37
+ SOUP_SIEVE_PRESENT = True
38
+ except ImportError:
39
+ SOUP_SIEVE_PRESENT = False
40
+
41
+ try:
42
+ import html5lib
43
+ HTML5LIB_PRESENT = True
44
+ except ImportError:
45
+ HTML5LIB_PRESENT = False
46
+
47
+ try:
48
+ import lxml.etree
49
+ LXML_PRESENT = True
50
+ LXML_VERSION = lxml.etree.LXML_VERSION
51
+ except ImportError:
52
+ LXML_PRESENT = False
53
+ LXML_VERSION = (0,)
54
+
55
+ BAD_DOCUMENT = """A bare string
56
+ <!DOCTYPE xsl:stylesheet SYSTEM "htmlent.dtd">
57
+ <!DOCTYPE xsl:stylesheet PUBLIC "htmlent.dtd">
58
+ <div><![CDATA[A CDATA section where it doesn't belong]]></div>
59
+ <div><svg><![CDATA[HTML5 does allow CDATA sections in SVG]]></svg></div>
60
+ <div>A <meta> tag</div>
61
+ <div>A <br> tag that supposedly has contents.</br></div>
62
+ <div>AT&T</div>
63
+ <div><textarea>Within a textarea, markup like <b> tags and <&<&amp; should be treated as literal</textarea></div>
64
+ <div><script>if (i < 2) { alert("<b>Markup within script tags should be treated as literal.</b>"); }</script></div>
65
+ <div>This numeric entity is missing the final semicolon: <x t="pi&#241ata"></div>
66
+ <div><a href="http://example.com/</a> that attribute value never got closed</div>
67
+ <div><a href="foo</a>, </a><a href="bar">that attribute value was closed by the subsequent tag</a></div>
68
+ <! This document starts with a bogus declaration ><div>a</div>
69
+ <div>This document contains <!an incomplete declaration <div>(do you see it?)</div>
70
+ <div>This document ends with <!an incomplete declaration
71
+ <div><a style={height:21px;}>That attribute value was bogus</a></div>
72
+ <! DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">The doctype is invalid because it contains extra whitespace
73
+ <div><table><td nowrap>That boolean attribute had no value</td></table></div>
74
+ <div>Here's a nonexistent entity: &#foo; (do you see it?)</div>
75
+ <div>This document ends before the entity finishes: &gt
76
+ <div><p>Paragraphs shouldn't contain block display elements, but this one does: <dl><dt>you see?</dt></p>
77
+ <b b="20" a="1" b="10" a="2" a="3" a="4">Multiple values for the same attribute.</b>
78
+ <div><table><tr><td>Here's a table</td></tr></table></div>
79
+ <div><table id="1"><tr><td>Here's a nested table:<table id="2"><tr><td>foo</td></tr></table></td></div>
80
+ <div>This tag contains nothing but whitespace: <b> </b></div>
81
+ <div><blockquote><p><b>This p tag is cut off by</blockquote></p>the end of the blockquote tag</div>
82
+ <div><table><div>This table contains bare markup</div></table></div>
83
+ <div><div id="1">\n <a href="link1">This link is never closed.\n</div>\n<div id="2">\n <div id="3">\n <a href="link2">This link is closed.</a>\n </div>\n</div></div>
84
+ <div>This document contains a <!DOCTYPE surprise>surprise doctype</div>
85
+ <div><a><B><Cd><EFG>Mixed case tags are folded to lowercase</efg></CD></b></A></div>
86
+ <div><our\u2603>Tag name contains Unicode characters</our\u2603></div>
87
+ <div><a \u2603="snowman">Attribute name contains Unicode characters</a></div>
88
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
89
+ """
90
+
91
+
92
+ class SoupTest(object):
93
+
94
+ @property
95
+ def default_builder(self):
96
+ return default_builder
97
+
98
+ def soup(self, markup, **kwargs):
99
+ """Build a Beautiful Soup object from markup."""
100
+ builder = kwargs.pop('builder', self.default_builder)
101
+ return BeautifulSoup(markup, builder=builder, **kwargs)
102
+
103
+ def document_for(self, markup, **kwargs):
104
+ """Turn an HTML fragment into a document.
105
+
106
+ The details depend on the builder.
107
+ """
108
+ return self.default_builder(**kwargs).test_fragment_to_document(markup)
109
+
110
+ def assert_soup(self, to_parse, compare_parsed_to=None):
111
+ """Parse some markup using Beautiful Soup and verify that
112
+ the output markup is as expected.
113
+ """
114
+ builder = self.default_builder
115
+ obj = BeautifulSoup(to_parse, builder=builder)
116
+ if compare_parsed_to is None:
117
+ compare_parsed_to = to_parse
118
+
119
+ # Verify that the documents come out the same.
120
+ assert obj.decode() == self.document_for(compare_parsed_to)
121
+
122
+ # Also run some checks on the BeautifulSoup object itself:
123
+
124
+ # Verify that every tag that was opened was eventually closed.
125
+
126
+ # There are no tags in the open tag counter.
127
+ assert all(v==0 for v in list(obj.open_tag_counter.values()))
128
+
129
+ # The only tag in the tag stack is the one for the root
130
+ # document.
131
+ assert [obj.ROOT_TAG_NAME] == [x.name for x in obj.tagStack]
132
+
133
+ assertSoupEquals = assert_soup
134
+
135
+ def assertConnectedness(self, element):
136
+ """Ensure that next_element and previous_element are properly
137
+ set for all descendants of the given element.
138
+ """
139
+ earlier = None
140
+ for e in element.descendants:
141
+ if earlier:
142
+ assert e == earlier.next_element
143
+ assert earlier == e.previous_element
144
+ earlier = e
145
+
146
+ def linkage_validator(self, el, _recursive_call=False):
147
+ """Ensure proper linkage throughout the document."""
148
+ descendant = None
149
+ # Document element should have no previous element or previous sibling.
150
+ # It also shouldn't have a next sibling.
151
+ if el.parent is None:
152
+ assert el.previous_element is None,\
153
+ "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format(
154
+ el, el.previous_element, None
155
+ )
156
+ assert el.previous_sibling is None,\
157
+ "Bad previous_sibling\nNODE: {}\nPREV: {}\nEXPECTED: {}".format(
158
+ el, el.previous_sibling, None
159
+ )
160
+ assert el.next_sibling is None,\
161
+ "Bad next_sibling\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format(
162
+ el, el.next_sibling, None
163
+ )
164
+
165
+ idx = 0
166
+ child = None
167
+ last_child = None
168
+ last_idx = len(el.contents) - 1
169
+ for child in el.contents:
170
+ descendant = None
171
+
172
+ # Parent should link next element to their first child
173
+ # That child should have no previous sibling
174
+ if idx == 0:
175
+ if el.parent is not None:
176
+ assert el.next_element is child,\
177
+ "Bad next_element\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format(
178
+ el, el.next_element, child
179
+ )
180
+ assert child.previous_element is el,\
181
+ "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format(
182
+ child, child.previous_element, el
183
+ )
184
+ assert child.previous_sibling is None,\
185
+ "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED: {}".format(
186
+ child, child.previous_sibling, None
187
+ )
188
+
189
+ # If not the first child, previous index should link as sibling to this index
190
+ # Previous element should match the last index or the last bubbled up descendant
191
+ else:
192
+ assert child.previous_sibling is el.contents[idx - 1],\
193
+ "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED {}".format(
194
+ child, child.previous_sibling, el.contents[idx - 1]
195
+ )
196
+ assert el.contents[idx - 1].next_sibling is child,\
197
+ "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
198
+ el.contents[idx - 1], el.contents[idx - 1].next_sibling, child
199
+ )
200
+
201
+ if last_child is not None:
202
+ assert child.previous_element is last_child,\
203
+ "Bad previous_element\nNODE: {}\nPREV {}\nEXPECTED {}\nCONTENTS {}".format(
204
+ child, child.previous_element, last_child, child.parent.contents
205
+ )
206
+ assert last_child.next_element is child,\
207
+ "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
208
+ last_child, last_child.next_element, child
209
+ )
210
+
211
+ if isinstance(child, Tag) and child.contents:
212
+ descendant = self.linkage_validator(child, True)
213
+ # A bubbled up descendant should have no next siblings
214
+ assert descendant.next_sibling is None,\
215
+ "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
216
+ descendant, descendant.next_sibling, None
217
+ )
218
+
219
+ # Mark last child as either the bubbled up descendant or the current child
220
+ if descendant is not None:
221
+ last_child = descendant
222
+ else:
223
+ last_child = child
224
+
225
+ # If last child, there are non next siblings
226
+ if idx == last_idx:
227
+ assert child.next_sibling is None,\
228
+ "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
229
+ child, child.next_sibling, None
230
+ )
231
+ idx += 1
232
+
233
+ child = descendant if descendant is not None else child
234
+ if child is None:
235
+ child = el
236
+
237
+ if not _recursive_call and child is not None:
238
+ target = el
239
+ while True:
240
+ if target is None:
241
+ assert child.next_element is None, \
242
+ "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
243
+ child, child.next_element, None
244
+ )
245
+ break
246
+ elif target.next_sibling is not None:
247
+ assert child.next_element is target.next_sibling, \
248
+ "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
249
+ child, child.next_element, target.next_sibling
250
+ )
251
+ break
252
+ target = target.parent
253
+
254
+ # We are done, so nothing to return
255
+ return None
256
+ else:
257
+ # Return the child to the recursive caller
258
+ return child
259
+
260
+ def assert_selects(self, tags, should_match):
261
+ """Make sure that the given tags have the correct text.
262
+
263
+ This is used in tests that define a bunch of tags, each
264
+ containing a single string, and then select certain strings by
265
+ some mechanism.
266
+ """
267
+ assert [tag.string for tag in tags] == should_match
268
+
269
+ def assert_selects_ids(self, tags, should_match):
270
+ """Make sure that the given tags have the correct IDs.
271
+
272
+ This is used in tests that define a bunch of tags, each
273
+ containing a single string, and then select certain strings by
274
+ some mechanism.
275
+ """
276
+ assert [tag['id'] for tag in tags] == should_match
277
+
278
+
279
+ class TreeBuilderSmokeTest(object):
280
+ # Tests that are common to HTML and XML tree builders.
281
+
282
+ @pytest.mark.parametrize(
283
+ "multi_valued_attributes",
284
+ [None, {}, dict(b=['class']), {'*': ['notclass']}]
285
+ )
286
+ def test_attribute_not_multi_valued(self, multi_valued_attributes):
287
+ markup = '<html xmlns="http://www.w3.org/1999/xhtml"><a class="a b c"></html>'
288
+ soup = self.soup(markup, multi_valued_attributes=multi_valued_attributes)
289
+ assert soup.a['class'] == 'a b c'
290
+
291
+ @pytest.mark.parametrize(
292
+ "multi_valued_attributes", [dict(a=['class']), {'*': ['class']}]
293
+ )
294
+ def test_attribute_multi_valued(self, multi_valued_attributes):
295
+ markup = '<a class="a b c">'
296
+ soup = self.soup(
297
+ markup, multi_valued_attributes=multi_valued_attributes
298
+ )
299
+ assert soup.a['class'] == ['a', 'b', 'c']
300
+
301
+ def test_invalid_doctype(self):
302
+ markup = '<![if word]>content<![endif]>'
303
+ markup = '<!DOCTYPE html]ff>'
304
+ soup = self.soup(markup)
305
+
306
+ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
307
+
308
+ """A basic test of a treebuilder's competence.
309
+
310
+ Any HTML treebuilder, present or future, should be able to pass
311
+ these tests. With invalid markup, there's room for interpretation,
312
+ and different parsers can handle it differently. But with the
313
+ markup in these tests, there's not much room for interpretation.
314
+ """
315
+
316
+ def test_empty_element_tags(self):
317
+ """Verify that all HTML4 and HTML5 empty element (aka void element) tags
318
+ are handled correctly.
319
+ """
320
+ for name in [
321
+ 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
322
+ 'spacer', 'frame'
323
+ ]:
324
+ soup = self.soup("")
325
+ new_tag = soup.new_tag(name)
326
+ assert new_tag.is_empty_element == True
327
+
328
+ def test_special_string_containers(self):
329
+ soup = self.soup(
330
+ "<style>Some CSS</style><script>Some Javascript</script>"
331
+ )
332
+ assert isinstance(soup.style.string, Stylesheet)
333
+ assert isinstance(soup.script.string, Script)
334
+
335
+ soup = self.soup(
336
+ "<style><!--Some CSS--></style>"
337
+ )
338
+ assert isinstance(soup.style.string, Stylesheet)
339
+ # The contents of the style tag resemble an HTML comment, but
340
+ # it's not treated as a comment.
341
+ assert soup.style.string == "<!--Some CSS-->"
342
+ assert isinstance(soup.style.string, Stylesheet)
343
+
344
+ def test_pickle_and_unpickle_identity(self):
345
+ # Pickling a tree, then unpickling it, yields a tree identical
346
+ # to the original.
347
+ tree = self.soup("<a><b>foo</a>")
348
+ dumped = pickle.dumps(tree, 2)
349
+ loaded = pickle.loads(dumped)
350
+ assert loaded.__class__ == BeautifulSoup
351
+ assert loaded.decode() == tree.decode()
352
+
353
+ def assertDoctypeHandled(self, doctype_fragment):
354
+ """Assert that a given doctype string is handled correctly."""
355
+ doctype_str, soup = self._document_with_doctype(doctype_fragment)
356
+
357
+ # Make sure a Doctype object was created.
358
+ doctype = soup.contents[0]
359
+ assert doctype.__class__ == Doctype
360
+ assert doctype == doctype_fragment
361
+ assert soup.encode("utf8")[:len(doctype_str)] == doctype_str
362
+
363
+ # Make sure that the doctype was correctly associated with the
364
+ # parse tree and that the rest of the document parsed.
365
+ assert soup.p.contents[0] == 'foo'
366
+
367
+ def _document_with_doctype(self, doctype_fragment, doctype_string="DOCTYPE"):
368
+ """Generate and parse a document with the given doctype."""
369
+ doctype = '<!%s %s>' % (doctype_string, doctype_fragment)
370
+ markup = doctype + '\n<p>foo</p>'
371
+ soup = self.soup(markup)
372
+ return doctype.encode("utf8"), soup
373
+
374
+ def test_normal_doctypes(self):
375
+ """Make sure normal, everyday HTML doctypes are handled correctly."""
376
+ self.assertDoctypeHandled("html")
377
+ self.assertDoctypeHandled(
378
+ 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
379
+
380
+ def test_empty_doctype(self):
381
+ soup = self.soup("<!DOCTYPE>")
382
+ doctype = soup.contents[0]
383
+ assert "" == doctype.strip()
384
+
385
+ def test_mixed_case_doctype(self):
386
+ # A lowercase or mixed-case doctype becomes a Doctype.
387
+ for doctype_fragment in ("doctype", "DocType"):
388
+ doctype_str, soup = self._document_with_doctype(
389
+ "html", doctype_fragment
390
+ )
391
+
392
+ # Make sure a Doctype object was created and that the DOCTYPE
393
+ # is uppercase.
394
+ doctype = soup.contents[0]
395
+ assert doctype.__class__ == Doctype
396
+ assert doctype == "html"
397
+ assert soup.encode("utf8")[:len(doctype_str)] == b"<!DOCTYPE html>"
398
+
399
+ # Make sure that the doctype was correctly associated with the
400
+ # parse tree and that the rest of the document parsed.
401
+ assert soup.p.contents[0] == 'foo'
402
+
403
+ def test_public_doctype_with_url(self):
404
+ doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
405
+ self.assertDoctypeHandled(doctype)
406
+
407
+ def test_system_doctype(self):
408
+ self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
409
+
410
+ def test_namespaced_system_doctype(self):
411
+ # We can handle a namespaced doctype with a system ID.
412
+ self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
413
+
414
+ def test_namespaced_public_doctype(self):
415
+ # Test a namespaced doctype with a public id.
416
+ self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
417
+
418
+ def test_real_xhtml_document(self):
419
+ """A real XHTML document should come out more or less the same as it went in."""
420
+ markup = b"""<?xml version="1.0" encoding="utf-8"?>
421
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
422
+ <html xmlns="http://www.w3.org/1999/xhtml">
423
+ <head><title>Hello.</title></head>
424
+ <body>Goodbye.</body>
425
+ </html>"""
426
+ with warnings.catch_warnings(record=True) as w:
427
+ soup = self.soup(markup)
428
+ assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"")
429
+
430
+ # No warning was issued about parsing an XML document as HTML,
431
+ # because XHTML is both.
432
+ assert w == []
433
+
434
+
435
+ def test_namespaced_html(self):
436
+ # When a namespaced XML document is parsed as HTML it should
437
+ # be treated as HTML with weird tag names.
438
+ markup = b"""<ns1:foo>content</ns1:foo><ns1:foo/><ns2:foo/>"""
439
+ with warnings.catch_warnings(record=True) as w:
440
+ soup = self.soup(markup)
441
+
442
+ assert 2 == len(soup.find_all("ns1:foo"))
443
+
444
+ # n.b. no "you're parsing XML as HTML" warning was given
445
+ # because there was no XML declaration.
446
+ assert [] == w
447
+
448
+ def test_detect_xml_parsed_as_html(self):
449
+ # A warning is issued when parsing an XML document as HTML,
450
+ # but basic stuff should still work.
451
+ markup = b"""<?xml version="1.0" encoding="utf-8"?><tag>string</tag>"""
452
+ with warnings.catch_warnings(record=True) as w:
453
+ soup = self.soup(markup)
454
+ assert soup.tag.string == 'string'
455
+ [warning] = w
456
+ assert isinstance(warning.message, XMLParsedAsHTMLWarning)
457
+ assert str(warning.message) == XMLParsedAsHTMLWarning.MESSAGE
458
+
459
+ # NOTE: the warning is not issued if the document appears to
460
+ # be XHTML (tested with test_real_xhtml_document in the
461
+ # superclass) or if there is no XML declaration (tested with
462
+ # test_namespaced_html in the superclass).
463
+
464
+ def test_processing_instruction(self):
465
+ # We test both Unicode and bytestring to verify that
466
+ # process_markup correctly sets processing_instruction_class
467
+ # even when the markup is already Unicode and there is no
468
+ # need to process anything.
469
+ markup = """<?PITarget PIContent?>"""
470
+ soup = self.soup(markup)
471
+ assert markup == soup.decode()
472
+
473
+ markup = b"""<?PITarget PIContent?>"""
474
+ soup = self.soup(markup)
475
+ assert markup == soup.encode("utf8")
476
+
477
+ def test_deepcopy(self):
478
+ """Make sure you can copy the tree builder.
479
+
480
+ This is important because the builder is part of a
481
+ BeautifulSoup object, and we want to be able to copy that.
482
+ """
483
+ copy.deepcopy(self.default_builder)
484
+
485
+ def test_p_tag_is_never_empty_element(self):
486
+ """A <p> tag is never designated as an empty-element tag.
487
+
488
+ Even if the markup shows it as an empty-element tag, it
489
+ shouldn't be presented that way.
490
+ """
491
+ soup = self.soup("<p/>")
492
+ assert not soup.p.is_empty_element
493
+ assert str(soup.p) == "<p></p>"
494
+
495
+ def test_unclosed_tags_get_closed(self):
496
+ """A tag that's not closed by the end of the document should be closed.
497
+
498
+ This applies to all tags except empty-element tags.
499
+ """
500
+ self.assert_soup("<p>", "<p></p>")
501
+ self.assert_soup("<b>", "<b></b>")
502
+
503
+ self.assert_soup("<br>", "<br/>")
504
+
505
+ def test_br_is_always_empty_element_tag(self):
506
+ """A <br> tag is designated as an empty-element tag.
507
+
508
+ Some parsers treat <br></br> as one <br/> tag, some parsers as
509
+ two tags, but it should always be an empty-element tag.
510
+ """
511
+ soup = self.soup("<br></br>")
512
+ assert soup.br.is_empty_element
513
+ assert str(soup.br) == "<br/>"
514
+
515
+ def test_nested_formatting_elements(self):
516
+ self.assert_soup("<em><em></em></em>")
517
+
518
+ def test_double_head(self):
519
+ html = '''<!DOCTYPE html>
520
+ <html>
521
+ <head>
522
+ <title>Ordinary HEAD element test</title>
523
+ </head>
524
+ <script type="text/javascript">
525
+ alert("Help!");
526
+ </script>
527
+ <body>
528
+ Hello, world!
529
+ </body>
530
+ </html>
531
+ '''
532
+ soup = self.soup(html)
533
+ assert "text/javascript" == soup.find('script')['type']
534
+
535
+ def test_comment(self):
536
+ # Comments are represented as Comment objects.
537
+ markup = "<p>foo<!--foobar-->baz</p>"
538
+ self.assert_soup(markup)
539
+
540
+ soup = self.soup(markup)
541
+ comment = soup.find(string="foobar")
542
+ assert comment.__class__ == Comment
543
+
544
+ # The comment is properly integrated into the tree.
545
+ foo = soup.find(string="foo")
546
+ assert comment == foo.next_element
547
+ baz = soup.find(string="baz")
548
+ assert comment == baz.previous_element
549
+
550
+ def test_preserved_whitespace_in_pre_and_textarea(self):
551
+ """Whitespace must be preserved in <pre> and <textarea> tags,
552
+ even if that would mean not prettifying the markup.
553
+ """
554
+ pre_markup = "<pre>a z</pre>\n"
555
+ textarea_markup = "<textarea> woo\nwoo </textarea>\n"
556
+ self.assert_soup(pre_markup)
557
+ self.assert_soup(textarea_markup)
558
+
559
+ soup = self.soup(pre_markup)
560
+ assert soup.pre.prettify() == pre_markup
561
+
562
+ soup = self.soup(textarea_markup)
563
+ assert soup.textarea.prettify() == textarea_markup
564
+
565
+ soup = self.soup("<textarea></textarea>")
566
+ assert soup.textarea.prettify() == "<textarea></textarea>\n"
567
+
568
+ def test_nested_inline_elements(self):
569
+ """Inline elements can be nested indefinitely."""
570
+ b_tag = "<b>Inside a B tag</b>"
571
+ self.assert_soup(b_tag)
572
+
573
+ nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
574
+ self.assert_soup(nested_b_tag)
575
+
576
+ double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
577
+ self.assert_soup(nested_b_tag)
578
+
579
+ def test_nested_block_level_elements(self):
580
+ """Block elements can be nested."""
581
+ soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
582
+ blockquote = soup.blockquote
583
+ assert blockquote.p.b.string == 'Foo'
584
+ assert blockquote.b.string == 'Foo'
585
+
586
+ def test_correctly_nested_tables(self):
587
+ """One table can go inside another one."""
588
+ markup = ('<table id="1">'
589
+ '<tr>'
590
+ "<td>Here's another table:"
591
+ '<table id="2">'
592
+ '<tr><td>foo</td></tr>'
593
+ '</table></td>')
594
+
595
+ self.assert_soup(
596
+ markup,
597
+ '<table id="1"><tr><td>Here\'s another table:'
598
+ '<table id="2"><tr><td>foo</td></tr></table>'
599
+ '</td></tr></table>')
600
+
601
+ self.assert_soup(
602
+ "<table><thead><tr><td>Foo</td></tr></thead>"
603
+ "<tbody><tr><td>Bar</td></tr></tbody>"
604
+ "<tfoot><tr><td>Baz</td></tr></tfoot></table>")
605
+
606
+ def test_multivalued_attribute_with_whitespace(self):
607
+ # Whitespace separating the values of a multi-valued attribute
608
+ # should be ignored.
609
+
610
+ markup = '<div class=" foo bar "></a>'
611
+ soup = self.soup(markup)
612
+ assert ['foo', 'bar'] == soup.div['class']
613
+
614
+ # If you search by the literal name of the class it's like the whitespace
615
+ # wasn't there.
616
+ assert soup.div == soup.find('div', class_="foo bar")
617
+
618
+ def test_deeply_nested_multivalued_attribute(self):
619
+ # html5lib can set the attributes of the same tag many times
620
+ # as it rearranges the tree. This has caused problems with
621
+ # multivalued attributes.
622
+ markup = '<table><div><div class="css"></div></div></table>'
623
+ soup = self.soup(markup)
624
+ assert ["css"] == soup.div.div['class']
625
+
626
+ def test_multivalued_attribute_on_html(self):
627
+ # html5lib uses a different API to set the attributes ot the
628
+ # <html> tag. This has caused problems with multivalued
629
+ # attributes.
630
+ markup = '<html class="a b"></html>'
631
+ soup = self.soup(markup)
632
+ assert ["a", "b"] == soup.html['class']
633
+
634
+ def test_angle_brackets_in_attribute_values_are_escaped(self):
635
+ self.assert_soup('<a b="<a>"></a>', '<a b="&lt;a&gt;"></a>')
636
+
637
+ def test_strings_resembling_character_entity_references(self):
638
+ # "&T" and "&p" look like incomplete character entities, but they are
639
+ # not.
640
+ self.assert_soup(
641
+ "<p>&bull; AT&T is in the s&p 500</p>",
642
+ "<p>\u2022 AT&amp;T is in the s&amp;p 500</p>"
643
+ )
644
+
645
+ def test_apos_entity(self):
646
+ self.assert_soup(
647
+ "<p>Bob&apos;s Bar</p>",
648
+ "<p>Bob's Bar</p>",
649
+ )
650
+
651
+ def test_entities_in_foreign_document_encoding(self):
652
+ # &#147; and &#148; are invalid numeric entities referencing
653
+ # Windows-1252 characters. &#45; references a character common
654
+ # to Windows-1252 and Unicode, and &#9731; references a
655
+ # character only found in Unicode.
656
+ #
657
+ # All of these entities should be converted to Unicode
658
+ # characters.
659
+ markup = "<p>&#147;Hello&#148; &#45;&#9731;</p>"
660
+ soup = self.soup(markup)
661
+ assert "“Hello” -☃" == soup.p.string
662
+
663
+ def test_entities_in_attributes_converted_to_unicode(self):
664
+ expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
665
+ self.assert_soup('<p id="pi&#241;ata"></p>', expect)
666
+ self.assert_soup('<p id="pi&#xf1;ata"></p>', expect)
667
+ self.assert_soup('<p id="pi&#Xf1;ata"></p>', expect)
668
+ self.assert_soup('<p id="pi&ntilde;ata"></p>', expect)
669
+
670
+ def test_entities_in_text_converted_to_unicode(self):
671
+ expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
672
+ self.assert_soup("<p>pi&#241;ata</p>", expect)
673
+ self.assert_soup("<p>pi&#xf1;ata</p>", expect)
674
+ self.assert_soup("<p>pi&#Xf1;ata</p>", expect)
675
+ self.assert_soup("<p>pi&ntilde;ata</p>", expect)
676
+
677
+ def test_quot_entity_converted_to_quotation_mark(self):
678
+ self.assert_soup("<p>I said &quot;good day!&quot;</p>",
679
+ '<p>I said "good day!"</p>')
680
+
681
+ def test_out_of_range_entity(self):
682
+ expect = "\N{REPLACEMENT CHARACTER}"
683
+ self.assert_soup("&#10000000000000;", expect)
684
+ self.assert_soup("&#x10000000000000;", expect)
685
+ self.assert_soup("&#1000000000;", expect)
686
+
687
+ def test_multipart_strings(self):
688
+ "Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
689
+ soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
690
+ assert "p" == soup.h2.string.next_element.name
691
+ assert "p" == soup.p.name
692
+ self.assertConnectedness(soup)
693
+
694
+ def test_empty_element_tags(self):
695
+ """Verify consistent handling of empty-element tags,
696
+ no matter how they come in through the markup.
697
+ """
698
+ self.assert_soup('<br/><br/><br/>', "<br/><br/><br/>")
699
+ self.assert_soup('<br /><br /><br />', "<br/><br/><br/>")
700
+
701
+ def test_head_tag_between_head_and_body(self):
702
+ "Prevent recurrence of a bug in the html5lib treebuilder."
703
+ content = """<html><head></head>
704
+ <link></link>
705
+ <body>foo</body>
706
+ </html>
707
+ """
708
+ soup = self.soup(content)
709
+ assert soup.html.body is not None
710
+ self.assertConnectedness(soup)
711
+
712
+ def test_multiple_copies_of_a_tag(self):
713
+ "Prevent recurrence of a bug in the html5lib treebuilder."
714
+ content = """<!DOCTYPE html>
715
+ <html>
716
+ <body>
717
+ <article id="a" >
718
+ <div><a href="1"></div>
719
+ <footer>
720
+ <a href="2"></a>
721
+ </footer>
722
+ </article>
723
+ </body>
724
+ </html>
725
+ """
726
+ soup = self.soup(content)
727
+ self.assertConnectedness(soup.article)
728
+
729
+ def test_basic_namespaces(self):
730
+ """Parsers don't need to *understand* namespaces, but at the
731
+ very least they should not choke on namespaces or lose
732
+ data."""
733
+
734
+ markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
735
+ soup = self.soup(markup)
736
+ assert markup == soup.encode()
737
+ html = soup.html
738
+ assert 'http://www.w3.org/1999/xhtml' == soup.html['xmlns']
739
+ assert 'http://www.w3.org/1998/Math/MathML' == soup.html['xmlns:mathml']
740
+ assert 'http://www.w3.org/2000/svg' == soup.html['xmlns:svg']
741
+
742
+ def test_multivalued_attribute_value_becomes_list(self):
743
+ markup = b'<a class="foo bar">'
744
+ soup = self.soup(markup)
745
+ assert ['foo', 'bar'] == soup.a['class']
746
+
747
+ #
748
+ # Generally speaking, tests below this point are more tests of
749
+ # Beautiful Soup than tests of the tree builders. But parsers are
750
+ # weird, so we run these tests separately for every tree builder
751
+ # to detect any differences between them.
752
+ #
753
+
754
+ def test_can_parse_unicode_document(self):
755
+ # A seemingly innocuous document... but it's in Unicode! And
756
+ # it contains characters that can't be represented in the
757
+ # encoding found in the declaration! The horror!
758
+ markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
759
+ soup = self.soup(markup)
760
+ assert 'Sacr\xe9 bleu!' == soup.body.string
761
+
762
+ def test_soupstrainer(self):
763
+ """Parsers should be able to work with SoupStrainers."""
764
+ strainer = SoupStrainer("b")
765
+ soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
766
+ parse_only=strainer)
767
+ assert soup.decode() == "<b>bold</b>"
768
+
769
+ def test_single_quote_attribute_values_become_double_quotes(self):
770
+ self.assert_soup("<foo attr='bar'></foo>",
771
+ '<foo attr="bar"></foo>')
772
+
773
+ def test_attribute_values_with_nested_quotes_are_left_alone(self):
774
+ text = """<foo attr='bar "brawls" happen'>a</foo>"""
775
+ self.assert_soup(text)
776
+
777
+ def test_attribute_values_with_double_nested_quotes_get_quoted(self):
778
+ text = """<foo attr='bar "brawls" happen'>a</foo>"""
779
+ soup = self.soup(text)
780
+ soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
781
+ self.assert_soup(
782
+ soup.foo.decode(),
783
+ """<foo attr="Brawls happen at &quot;Bob\'s Bar&quot;">a</foo>""")
784
+
785
+ def test_ampersand_in_attribute_value_gets_escaped(self):
786
+ self.assert_soup('<this is="really messed up & stuff"></this>',
787
+ '<this is="really messed up &amp; stuff"></this>')
788
+
789
+ self.assert_soup(
790
+ '<a href="http://example.org?a=1&b=2;3">foo</a>',
791
+ '<a href="http://example.org?a=1&amp;b=2;3">foo</a>')
792
+
793
+ def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
794
+ self.assert_soup('<a href="http://example.org?a=1&amp;b=2;3"></a>')
795
+
796
+ def test_entities_in_strings_converted_during_parsing(self):
797
+ # Both XML and HTML entities are converted to Unicode characters
798
+ # during parsing.
799
+ text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>"
800
+ expected = "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>"
801
+ self.assert_soup(text, expected)
802
+
803
+ def test_smart_quotes_converted_on_the_way_in(self):
804
+ # Microsoft smart quotes are converted to Unicode characters during
805
+ # parsing.
806
+ quote = b"<p>\x91Foo\x92</p>"
807
+ soup = self.soup(quote)
808
+ assert soup.p.string == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}"
809
+
810
+ def test_non_breaking_spaces_converted_on_the_way_in(self):
811
+ soup = self.soup("<a>&nbsp;&nbsp;</a>")
812
+ assert soup.a.string == "\N{NO-BREAK SPACE}" * 2
813
+
814
+ def test_entities_converted_on_the_way_out(self):
815
+ text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>"
816
+ expected = "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>".encode("utf-8")
817
+ soup = self.soup(text)
818
+ assert soup.p.encode("utf-8") == expected
819
+
820
+ def test_real_iso_8859_document(self):
821
+ # Smoke test of interrelated functionality, using an
822
+ # easy-to-understand document.
823
+
824
+ # Here it is in Unicode. Note that it claims to be in ISO-8859-1.
825
+ unicode_html = '<html><head><meta content="text/html; charset=ISO-8859-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
826
+
827
+ # That's because we're going to encode it into ISO-8859-1,
828
+ # and use that to test.
829
+ iso_latin_html = unicode_html.encode("iso-8859-1")
830
+
831
+ # Parse the ISO-8859-1 HTML.
832
+ soup = self.soup(iso_latin_html)
833
+
834
+ # Encode it to UTF-8.
835
+ result = soup.encode("utf-8")
836
+
837
+ # What do we expect the result to look like? Well, it would
838
+ # look like unicode_html, except that the META tag would say
839
+ # UTF-8 instead of ISO-8859-1.
840
+ expected = unicode_html.replace("ISO-8859-1", "utf-8")
841
+
842
+ # And, of course, it would be in UTF-8, not Unicode.
843
+ expected = expected.encode("utf-8")
844
+
845
+ # Ta-da!
846
+ assert result == expected
847
+
848
+ def test_real_shift_jis_document(self):
849
+ # Smoke test to make sure the parser can handle a document in
850
+ # Shift-JIS encoding, without choking.
851
+ shift_jis_html = (
852
+ b'<html><head></head><body><pre>'
853
+ b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
854
+ b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
855
+ b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
856
+ b'</pre></body></html>')
857
+ unicode_html = shift_jis_html.decode("shift-jis")
858
+ soup = self.soup(unicode_html)
859
+
860
+ # Make sure the parse tree is correctly encoded to various
861
+ # encodings.
862
+ assert soup.encode("utf-8") == unicode_html.encode("utf-8")
863
+ assert soup.encode("euc_jp") == unicode_html.encode("euc_jp")
864
+
865
+ def test_real_hebrew_document(self):
866
+ # A real-world test to make sure we can convert ISO-8859-9 (a
867
+ # Hebrew encoding) to UTF-8.
868
+ hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
869
+ soup = self.soup(
870
+ hebrew_document, from_encoding="iso8859-8")
871
+ # Some tree builders call it iso8859-8, others call it iso-8859-9.
872
+ # That's not a difference we really care about.
873
+ assert soup.original_encoding in ('iso8859-8', 'iso-8859-8')
874
+ assert soup.encode('utf-8') == (
875
+ hebrew_document.decode("iso8859-8").encode("utf-8")
876
+ )
877
+
878
+ def test_meta_tag_reflects_current_encoding(self):
879
+ # Here's the <meta> tag saying that a document is
880
+ # encoded in Shift-JIS.
881
+ meta_tag = ('<meta content="text/html; charset=x-sjis" '
882
+ 'http-equiv="Content-type"/>')
883
+
884
+ # Here's a document incorporating that meta tag.
885
+ shift_jis_html = (
886
+ '<html><head>\n%s\n'
887
+ '<meta http-equiv="Content-language" content="ja"/>'
888
+ '</head><body>Shift-JIS markup goes here.') % meta_tag
889
+ soup = self.soup(shift_jis_html)
890
+
891
+ # Parse the document, and the charset is seemingly unaffected.
892
+ parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
893
+ content = parsed_meta['content']
894
+ assert 'text/html; charset=x-sjis' == content
895
+
896
+ # But that value is actually a ContentMetaAttributeValue object.
897
+ assert isinstance(content, ContentMetaAttributeValue)
898
+
899
+ # And it will take on a value that reflects its current
900
+ # encoding.
901
+ assert 'text/html; charset=utf8' == content.encode("utf8")
902
+
903
+ # For the rest of the story, see TestSubstitutions in
904
+ # test_tree.py.
905
+
906
+ def test_html5_style_meta_tag_reflects_current_encoding(self):
907
+ # Here's the <meta> tag saying that a document is
908
+ # encoded in Shift-JIS.
909
+ meta_tag = ('<meta id="encoding" charset="x-sjis" />')
910
+
911
+ # Here's a document incorporating that meta tag.
912
+ shift_jis_html = (
913
+ '<html><head>\n%s\n'
914
+ '<meta http-equiv="Content-language" content="ja"/>'
915
+ '</head><body>Shift-JIS markup goes here.') % meta_tag
916
+ soup = self.soup(shift_jis_html)
917
+
918
+ # Parse the document, and the charset is seemingly unaffected.
919
+ parsed_meta = soup.find('meta', id="encoding")
920
+ charset = parsed_meta['charset']
921
+ assert 'x-sjis' == charset
922
+
923
+ # But that value is actually a CharsetMetaAttributeValue object.
924
+ assert isinstance(charset, CharsetMetaAttributeValue)
925
+
926
+ # And it will take on a value that reflects its current
927
+ # encoding.
928
+ assert 'utf8' == charset.encode("utf8")
929
+
930
+ def test_python_specific_encodings_not_used_in_charset(self):
931
+ # You can encode an HTML document using a Python-specific
932
+ # encoding, but that encoding won't be mentioned _inside_ the
933
+ # resulting document. Instead, the document will appear to
934
+ # have no encoding.
935
+ for markup in [
936
+ b'<meta charset="utf8"></head>'
937
+ b'<meta id="encoding" charset="utf-8" />'
938
+ ]:
939
+ soup = self.soup(markup)
940
+ for encoding in PYTHON_SPECIFIC_ENCODINGS:
941
+ if encoding in (
942
+ 'idna', 'mbcs', 'oem', 'undefined',
943
+ 'string_escape', 'string-escape'
944
+ ):
945
+ # For one reason or another, these will raise an
946
+ # exception if we actually try to use them, so don't
947
+ # bother.
948
+ continue
949
+ encoded = soup.encode(encoding)
950
+ assert b'meta charset=""' in encoded
951
+ assert encoding.encode("ascii") not in encoded
952
+
953
+ def test_tag_with_no_attributes_can_have_attributes_added(self):
954
+ data = self.soup("<a>text</a>")
955
+ data.a['foo'] = 'bar'
956
+ assert '<a foo="bar">text</a>' == data.a.decode()
957
+
958
+ def test_closing_tag_with_no_opening_tag(self):
959
+ # Without BeautifulSoup.open_tag_counter, the </span> tag will
960
+ # cause _popToTag to be called over and over again as we look
961
+ # for a <span> tag that wasn't there. The result is that 'text2'
962
+ # will show up outside the body of the document.
963
+ soup = self.soup("<body><div><p>text1</p></span>text2</div></body>")
964
+ assert "<body><div><p>text1</p>text2</div></body>" == soup.body.decode()
965
+
966
+ def test_worst_case(self):
967
+ """Test the worst case (currently) for linking issues."""
968
+
969
+ soup = self.soup(BAD_DOCUMENT)
970
+ self.linkage_validator(soup)
971
+
972
+
973
+ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
974
+
975
+ def test_pickle_and_unpickle_identity(self):
976
+ # Pickling a tree, then unpickling it, yields a tree identical
977
+ # to the original.
978
+ tree = self.soup("<a><b>foo</a>")
979
+ dumped = pickle.dumps(tree, 2)
980
+ loaded = pickle.loads(dumped)
981
+ assert loaded.__class__ == BeautifulSoup
982
+ assert loaded.decode() == tree.decode()
983
+
984
+ def test_docstring_generated(self):
985
+ soup = self.soup("<root/>")
986
+ assert soup.encode() == b'<?xml version="1.0" encoding="utf-8"?>\n<root/>'
987
+
988
+ def test_xml_declaration(self):
989
+ markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>"""
990
+ soup = self.soup(markup)
991
+ assert markup == soup.encode("utf8")
992
+
993
+ def test_python_specific_encodings_not_used_in_xml_declaration(self):
994
+ # You can encode an XML document using a Python-specific
995
+ # encoding, but that encoding won't be mentioned _inside_ the
996
+ # resulting document.
997
+ markup = b"""<?xml version="1.0"?>\n<foo/>"""
998
+ soup = self.soup(markup)
999
+ for encoding in PYTHON_SPECIFIC_ENCODINGS:
1000
+ if encoding in (
1001
+ 'idna', 'mbcs', 'oem', 'undefined',
1002
+ 'string_escape', 'string-escape'
1003
+ ):
1004
+ # For one reason or another, these will raise an
1005
+ # exception if we actually try to use them, so don't
1006
+ # bother.
1007
+ continue
1008
+ encoded = soup.encode(encoding)
1009
+ assert b'<?xml version="1.0"?>' in encoded
1010
+ assert encoding.encode("ascii") not in encoded
1011
+
1012
+ def test_processing_instruction(self):
1013
+ markup = b"""<?xml version="1.0" encoding="utf8"?>\n<?PITarget PIContent?>"""
1014
+ soup = self.soup(markup)
1015
+ assert markup == soup.encode("utf8")
1016
+
1017
+ def test_real_xhtml_document(self):
1018
+ """A real XHTML document should come out *exactly* the same as it went in."""
1019
+ markup = b"""<?xml version="1.0" encoding="utf-8"?>
1020
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
1021
+ <html xmlns="http://www.w3.org/1999/xhtml">
1022
+ <head><title>Hello.</title></head>
1023
+ <body>Goodbye.</body>
1024
+ </html>"""
1025
+ soup = self.soup(markup)
1026
+ assert soup.encode("utf-8") == markup
1027
+
1028
+ def test_nested_namespaces(self):
1029
+ doc = b"""<?xml version="1.0" encoding="utf-8"?>
1030
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
1031
+ <parent xmlns="http://ns1/">
1032
+ <child xmlns="http://ns2/" xmlns:ns3="http://ns3/">
1033
+ <grandchild ns3:attr="value" xmlns="http://ns4/"/>
1034
+ </child>
1035
+ </parent>"""
1036
+ soup = self.soup(doc)
1037
+ assert doc == soup.encode()
1038
+
1039
+ def test_formatter_processes_script_tag_for_xml_documents(self):
1040
+ doc = """
1041
+ <script type="text/javascript">
1042
+ </script>
1043
+ """
1044
+ soup = BeautifulSoup(doc, "lxml-xml")
1045
+ # lxml would have stripped this while parsing, but we can add
1046
+ # it later.
1047
+ soup.script.string = 'console.log("< < hey > > ");'
1048
+ encoded = soup.encode()
1049
+ assert b"&lt; &lt; hey &gt; &gt;" in encoded
1050
+
1051
+ def test_can_parse_unicode_document(self):
1052
+ markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
1053
+ soup = self.soup(markup)
1054
+ assert 'Sacr\xe9 bleu!' == soup.root.string
1055
+
1056
+ def test_can_parse_unicode_document_begining_with_bom(self):
1057
+ markup = '\N{BYTE ORDER MARK}<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
1058
+ soup = self.soup(markup)
1059
+ assert 'Sacr\xe9 bleu!' == soup.root.string
1060
+
1061
+ def test_popping_namespaced_tag(self):
1062
+ markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
1063
+ soup = self.soup(markup)
1064
+ assert str(soup.rss) == markup
1065
+
1066
+ def test_docstring_includes_correct_encoding(self):
1067
+ soup = self.soup("<root/>")
1068
+ assert soup.encode("latin1") == b'<?xml version="1.0" encoding="latin1"?>\n<root/>'
1069
+
1070
+ def test_large_xml_document(self):
1071
+ """A large XML document should come out the same as it went in."""
1072
+ markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
1073
+ + b'0' * (2**12)
1074
+ + b'</root>')
1075
+ soup = self.soup(markup)
1076
+ assert soup.encode("utf-8") == markup
1077
+
1078
+ def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
1079
+ self.assert_soup("<p>", "<p/>")
1080
+ self.assert_soup("<p>foo</p>")
1081
+
1082
+ def test_namespaces_are_preserved(self):
1083
+ markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
1084
+ soup = self.soup(markup)
1085
+ root = soup.root
1086
+ assert "http://example.com/" == root['xmlns:a']
1087
+ assert "http://example.net/" == root['xmlns:b']
1088
+
1089
+ def test_closing_namespaced_tag(self):
1090
+ markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
1091
+ soup = self.soup(markup)
1092
+ assert str(soup.p) == markup
1093
+
1094
+ def test_namespaced_attributes(self):
1095
+ markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
1096
+ soup = self.soup(markup)
1097
+ assert str(soup.foo) == markup
1098
+
1099
+ def test_namespaced_attributes_xml_namespace(self):
1100
+ markup = '<foo xml:lang="fr">bar</foo>'
1101
+ soup = self.soup(markup)
1102
+ assert str(soup.foo) == markup
1103
+
1104
+ def test_find_by_prefixed_name(self):
1105
+ doc = """<?xml version="1.0" encoding="utf-8"?>
1106
+ <Document xmlns="http://example.com/ns0"
1107
+ xmlns:ns1="http://example.com/ns1"
1108
+ xmlns:ns2="http://example.com/ns2">
1109
+ <ns1:tag>foo</ns1:tag>
1110
+ <ns1:tag>bar</ns1:tag>
1111
+ <ns2:tag key="value">baz</ns2:tag>
1112
+ </Document>
1113
+ """
1114
+ soup = self.soup(doc)
1115
+
1116
+ # There are three <tag> tags.
1117
+ assert 3 == len(soup.find_all('tag'))
1118
+
1119
+ # But two of them are ns1:tag and one of them is ns2:tag.
1120
+ assert 2 == len(soup.find_all('ns1:tag'))
1121
+ assert 1 == len(soup.find_all('ns2:tag'))
1122
+
1123
+ assert 1, len(soup.find_all('ns2:tag', key='value'))
1124
+ assert 3, len(soup.find_all(['ns1:tag', 'ns2:tag']))
1125
+
1126
+ def test_copy_tag_preserves_namespace(self):
1127
+ xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
1128
+ <w:document xmlns:w="http://example.com/ns0"/>"""
1129
+
1130
+ soup = self.soup(xml)
1131
+ tag = soup.document
1132
+ duplicate = copy.copy(tag)
1133
+
1134
+ # The two tags have the same namespace prefix.
1135
+ assert tag.prefix == duplicate.prefix
1136
+
1137
+ def test_worst_case(self):
1138
+ """Test the worst case (currently) for linking issues."""
1139
+
1140
+ soup = self.soup(BAD_DOCUMENT)
1141
+ self.linkage_validator(soup)
1142
+
1143
+
1144
+ class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
1145
+ """Smoke test for a tree builder that supports HTML5."""
1146
+
1147
+ def test_real_xhtml_document(self):
1148
+ # Since XHTML is not HTML5, HTML5 parsers are not tested to handle
1149
+ # XHTML documents in any particular way.
1150
+ pass
1151
+
1152
+ def test_html_tags_have_namespace(self):
1153
+ markup = "<a>"
1154
+ soup = self.soup(markup)
1155
+ assert "http://www.w3.org/1999/xhtml" == soup.a.namespace
1156
+
1157
+ def test_svg_tags_have_namespace(self):
1158
+ markup = '<svg><circle/></svg>'
1159
+ soup = self.soup(markup)
1160
+ namespace = "http://www.w3.org/2000/svg"
1161
+ assert namespace == soup.svg.namespace
1162
+ assert namespace == soup.circle.namespace
1163
+
1164
+
1165
+ def test_mathml_tags_have_namespace(self):
1166
+ markup = '<math><msqrt>5</msqrt></math>'
1167
+ soup = self.soup(markup)
1168
+ namespace = 'http://www.w3.org/1998/Math/MathML'
1169
+ assert namespace == soup.math.namespace
1170
+ assert namespace == soup.msqrt.namespace
1171
+
1172
+ def test_xml_declaration_becomes_comment(self):
1173
+ markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
1174
+ soup = self.soup(markup)
1175
+ assert isinstance(soup.contents[0], Comment)
1176
+ assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?'
1177
+ assert "html" == soup.contents[0].next_element.name
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/tests/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (67 kB). View file
 
cmrithackathon-master/.venv/lib/python3.11/site-packages/bs4/tests/__pycache__/test_builder.cpython-311.pyc ADDED
Binary file (1.88 kB). View file