inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
⌀ | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def speak(speaker, content):
if speaker:
p = await asyncio.create_subprocess_exec(speaker, content)
await p.communicate()
# ollama-python/ollama/_types.py
def __init__(self, error: str):
super().__init__(error)
self.error = error
'Reason for the error.'
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
<fim_suffix>
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images] | for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images] | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--speak', default=False, action='store_true')
args = parser.parse_args()
speaker = None
if not args.speak:
...
elif say := shutil.which('say'):
speaker = say
elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')):
speaker = espeak
client = ollama.AsyncClient()
messages = []
while True:
if content_in := input('>>> '):
messages.append({'role': 'user', 'content': content_in})
content_out = ''
message = {'role': 'assistant', 'content': ''}
async for response in await client.chat(model='mistral', messages=messages, stream=True):
if response['done']:
messages.append(message)
content = response['message']['content']
print(content, end='', flush=True)
content_out += content
if content in ['.', '!', '?', '\n']:
await speak(speaker, content_out)
content_out = ''
message['content'] += content
if content_out:
await speak(speaker, content_out)
print()
# ollama-python/ollama/_types.py
def __init__(self, error: str):
super().__init__(error)
self.error = error
'Reason for the error.'
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
<fim_suffix>
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial | async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--speak', default=False, action='store_true')
args = parser.parse_args()
speaker = None
if not args.speak:
...
elif say := shutil.which('say'):
speaker = say
elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')):
speaker = espeak
client = ollama.AsyncClient()
messages = []
while True:
if content_in := input('>>> '):
messages.append({'role': 'user', 'content': content_in})
content_out = ''
message = {'role': 'assistant', 'content': ''}
async for response in await client.chat(model='mistral', messages=messages, stream=True):
if response['done']:
messages.append(message)
content = response['message']['content']
print(content, end='', flush=True)
content_out += content
if content in ['.', '!', '?', '\n']:
await speak(speaker, content_out)
content_out = ''
message['content'] += content
if content_out:
await speak(speaker, content_out)
print()
# ollama-python/ollama/_types.py
def __init__(self, error: str):
super().__init__(error)
self.error = error
'Reason for the error.'
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
<fim_suffix>
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial | async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--speak', default=False, action='store_true')
args = parser.parse_args()
speaker = None
if not args.speak:
...
elif say := shutil.which('say'):
speaker = say
elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')):
speaker = espeak
client = ollama.AsyncClient()
messages = []
while True:
if content_in := input('>>> '):
messages.append({'role': 'user', 'content': content_in})
content_out = ''
message = {'role': 'assistant', 'content': ''}
async for response in await client.chat(model='mistral', messages=messages, stream=True):
if response['done']:
messages.append(message)
content = response['message']['content']
print(content, end='', flush=True)
content_out += content
if content in ['.', '!', '?', '\n']:
await speak(speaker, content_out)
content_out = ''
message['content'] += content
if content_out:
await speak(speaker, content_out)
print()
# ollama-python/ollama/_types.py
def __init__(self, error: str):
super().__init__(error)
self.error = error
'Reason for the error.'
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
<fim_suffix>
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial | async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/examples/async-chat-stream/main.py
async def speak(speaker, content):
if speaker:
p = await asyncio.create_subprocess_exec(speaker, content)
await p.communicate()
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--speak', default=False, action='store_true')
args = parser.parse_args()
speaker = None
if not args.speak:
...
elif say := shutil.which('say'):
speaker = say
elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')):
speaker = espeak
client = ollama.AsyncClient()
messages = []
while True:
if content_in := input('>>> '):
messages.append({'role': 'user', 'content': content_in})
content_out = ''
message = {'role': 'assistant', 'content': ''}
async for response in await client.chat(model='mistral', messages=messages, stream=True):
if response['done']:
messages.append(message)
content = response['message']['content']
print(content, end='', flush=True)
content_out += content
if content in ['.', '!', '?', '\n']:
await speak(speaker, content_out)
content_out = ''
message['content'] += content
if content_out:
await speak(speaker, content_out)
print()
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
<fim_suffix>
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk | async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/examples/async-chat-stream/main.py
async def speak(speaker, content):
if speaker:
p = await asyncio.create_subprocess_exec(speaker, content)
await p.communicate()
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--speak', default=False, action='store_true')
args = parser.parse_args()
speaker = None
if not args.speak:
...
elif say := shutil.which('say'):
speaker = say
elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')):
speaker = espeak
client = ollama.AsyncClient()
messages = []
while True:
if content_in := input('>>> '):
messages.append({'role': 'user', 'content': content_in})
content_out = ''
message = {'role': 'assistant', 'content': ''}
async for response in await client.chat(model='mistral', messages=messages, stream=True):
if response['done']:
messages.append(message)
content = response['message']['content']
print(content, end='', flush=True)
content_out += content
if content in ['.', '!', '?', '\n']:
await speak(speaker, content_out)
content_out = ''
message['content'] += content
if content_out:
await speak(speaker, content_out)
print()
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
async def upload_bytes():
with open(path, 'rb') as r:
<fim_suffix>
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk | while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk | WHILE | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/examples/async-chat-stream/main.py
async def speak(speaker, content):
if speaker:
p = await asyncio.create_subprocess_exec(speaker, content)
await p.communicate()
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--speak', default=False, action='store_true')
args = parser.parse_args()
speaker = None
if not args.speak:
...
elif say := shutil.which('say'):
speaker = say
elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')):
speaker = espeak
client = ollama.AsyncClient()
messages = []
while True:
if content_in := input('>>> '):
messages.append({'role': 'user', 'content': content_in})
content_out = ''
message = {'role': 'assistant', 'content': ''}
async for response in await client.chat(model='mistral', messages=messages, stream=True):
if response['done']:
messages.append(message)
content = response['message']['content']
print(content, end='', flush=True)
content_out += content
if content in ['.', '!', '?', '\n']:
await speak(speaker, content_out)
content_out = ''
message['content'] += content
if content_out:
await speak(speaker, content_out)
print()
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
<fim_suffix>
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk) | while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk) | WHILE | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>ollama-python/ollama/_client.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# ollama-python/ollama/_types.py
def __init__(self, error: str, status_code: int = -1):
try:
# try to parse content as JSON and extract 'error'
# fallback to raw content if JSON parsing fails
error = json.loads(error).get('error', error)
except json.JSONDecodeError:
...
super().__init__(error)
self.error = error
'Reason for the error.'
self.status_code = status_code
'HTTP status code of the response.'
# ollama-python/examples/async-chat-stream/main.py
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--speak', default=False, action='store_true')
args = parser.parse_args()
speaker = None
if not args.speak:
...
elif say := shutil.which('say'):
speaker = say
elif (espeak := shutil.which('espeak')) or (espeak := shutil.which('espeak-ng')):
speaker = espeak
client = ollama.AsyncClient()
messages = []
while True:
if content_in := input('>>> '):
messages.append({'role': 'user', 'content': content_in})
content_out = ''
message = {'role': 'assistant', 'content': ''}
async for response in await client.chat(model='mistral', messages=messages, stream=True):
if response['done']:
messages.append(message)
content = response['message']['content']
print(content, end='', flush=True)
content_out += content
if content in ['.', '!', '?', '\n']:
await speak(speaker, content_out)
content_out = ''
message['content'] += content
if content_out:
await speak(speaker, content_out)
print()
# ollama-python/ollama/_types.py
def __init__(self, error: str):
super().__init__(error)
self.error = error
'Reason for the error.'
"""
import os
import io
import json
import httpx
import binascii
import platform
import urllib.parse
from os import PathLike
from pathlib import Path
from hashlib import sha256
from base64 import b64encode, b64decode
from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal
import sys
if sys.version_info < (3, 9):
from typing import Iterator, AsyncIterator
else:
from collections.abc import Iterator, AsyncIterator
from importlib import metadata
try:
__version__ = metadata.version('ollama')
except metadata.PackageNotFoundError:
__version__ = '0.0.0'
from ollama._types import Message, Options, RequestError, ResponseError
class BaseClient:
def __init__(
self,
client,
host: Optional[str] = None,
follow_redirects: bool = True,
timeout: Any = None,
**kwargs,
) -> None:
"""
Creates a httpx client. Default parameters are the same as those defined in httpx
except for the following:
- `follow_redirects`: True
- `timeout`: None
`kwargs` are passed to the httpx client.
"""
headers = kwargs.pop('headers', {})
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}'
self._client = client(
base_url=_parse_host(host or os.getenv('OLLAMA_HOST')),
follow_redirects=follow_redirects,
timeout=timeout,
headers=headers,
**kwargs,
)
class Client(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.Client, host, **kwargs)
def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]:
with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
for line in r.iter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json()
def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of Message or dict-like objects')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
return self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
).json()
def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
<fim_suffix>
digest = f'sha256:{sha256sum.hexdigest()}'
try:
self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
with open(path, 'rb') as r:
self._request('POST', f'/api/blobs/{digest}', content=r)
return digest
def delete(self, model: str) -> Mapping[str, Any]:
response = self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
def list(self) -> Mapping[str, Any]:
return self._request('GET', '/api/tags').json()
def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
def show(self, model: str) -> Mapping[str, Any]:
return self._request('POST', '/api/show', json={'name': model}).json()
class AsyncClient(BaseClient):
def __init__(self, host: Optional[str] = None, **kwargs) -> None:
super().__init__(httpx.AsyncClient, host, **kwargs)
async def _request(self, method: str, url: str, **kwargs) -> httpx.Response:
response = await self._client.request(method, url, **kwargs)
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
raise ResponseError(e.response.text, e.response.status_code) from None
return response
async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]:
async def inner():
async with self._client.stream(method, url, **kwargs) as r:
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
e.response.read()
raise ResponseError(e.response.text, e.response.status_code) from None
async for line in r.aiter_lines():
partial = json.loads(line)
if e := partial.get('error'):
raise ResponseError(e)
yield partial
return inner()
async def _request_stream(
self,
*args,
stream: bool = False,
**kwargs,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
if stream:
return await self._stream(*args, **kwargs)
response = await self._request(*args, **kwargs)
return response.json()
async def generate(
self,
model: str = '',
prompt: str = '',
system: str = '',
template: str = '',
context: Optional[Sequence[int]] = None,
stream: bool = False,
raw: bool = False,
format: Literal['', 'json'] = '',
images: Optional[Sequence[AnyStr]] = None,
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
return await self._request_stream(
'POST',
'/api/generate',
json={
'model': model,
'prompt': prompt,
'system': system,
'template': template,
'context': context or [],
'stream': stream,
'raw': raw,
'images': [_encode_image(image) for image in images or []],
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def chat(
self,
model: str = '',
messages: Optional[Sequence[Message]] = None,
stream: bool = False,
format: Literal['', 'json'] = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Create a chat response using the requested model.
Raises `RequestError` if a model is not provided.
Raises `ResponseError` if the request could not be fulfilled.
Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator.
"""
if not model:
raise RequestError('must provide a model')
for message in messages or []:
if not isinstance(message, dict):
raise TypeError('messages must be a list of strings')
if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']:
raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"')
if not message.get('content'):
raise RequestError('messages must contain content')
if images := message.get('images'):
message['images'] = [_encode_image(image) for image in images]
return await self._request_stream(
'POST',
'/api/chat',
json={
'model': model,
'messages': messages,
'stream': stream,
'format': format,
'options': options or {},
'keep_alive': keep_alive,
},
stream=stream,
)
async def embeddings(
self,
model: str = '',
prompt: str = '',
options: Optional[Options] = None,
keep_alive: Optional[Union[float, str]] = None,
) -> Sequence[float]:
response = await self._request(
'POST',
'/api/embeddings',
json={
'model': model,
'prompt': prompt,
'options': options or {},
'keep_alive': keep_alive,
},
)
return response.json()
async def pull(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/pull',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def push(
self,
model: str,
insecure: bool = False,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
return await self._request_stream(
'POST',
'/api/push',
json={
'name': model,
'insecure': insecure,
'stream': stream,
},
stream=stream,
)
async def create(
self,
model: str,
path: Optional[Union[str, PathLike]] = None,
modelfile: Optional[str] = None,
stream: bool = False,
) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]:
"""
Raises `ResponseError` if the request could not be fulfilled.
Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator.
"""
if (realpath := _as_path(path)) and realpath.exists():
modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent)
elif modelfile:
modelfile = await self._parse_modelfile(modelfile)
else:
raise RequestError('must provide either path or modelfile')
return await self._request_stream(
'POST',
'/api/create',
json={
'name': model,
'modelfile': modelfile,
'stream': stream,
},
stream=stream,
)
async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str:
base = Path.cwd() if base is None else base
out = io.StringIO()
for line in io.StringIO(modelfile):
command, _, args = line.partition(' ')
if command.upper() not in ['FROM', 'ADAPTER']:
print(line, end='', file=out)
continue
path = Path(args.strip()).expanduser()
path = path if path.is_absolute() else base / path
if path.exists():
args = f'@{await self._create_blob(path)}\n'
print(command, args, end='', file=out)
return out.getvalue()
async def _create_blob(self, path: Union[str, Path]) -> str:
sha256sum = sha256()
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk)
digest = f'sha256:{sha256sum.hexdigest()}'
try:
await self._request('HEAD', f'/api/blobs/{digest}')
except ResponseError as e:
if e.status_code != 404:
raise
async def upload_bytes():
with open(path, 'rb') as r:
while True:
chunk = r.read(32 * 1024)
if not chunk:
break
yield chunk
await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes())
return digest
async def delete(self, model: str) -> Mapping[str, Any]:
response = await self._request('DELETE', '/api/delete', json={'name': model})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def list(self) -> Mapping[str, Any]:
response = await self._request('GET', '/api/tags')
return response.json()
async def copy(self, source: str, destination: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination})
return {'status': 'success' if response.status_code == 200 else 'error'}
async def show(self, model: str) -> Mapping[str, Any]:
response = await self._request('POST', '/api/show', json={'name': model})
return response.json()
def _encode_image(image) -> str:
"""
>>> _encode_image(b'ollama')
'b2xsYW1h'
>>> _encode_image(io.BytesIO(b'ollama'))
'b2xsYW1h'
>>> _encode_image('LICENSE')
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image(Path('LICENSE'))
'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo='
>>> _encode_image('YWJj')
'YWJj'
>>> _encode_image(b'YWJj')
'YWJj'
"""
if p := _as_path(image):
return b64encode(p.read_bytes()).decode('utf-8')
try:
b64decode(image, validate=True)
return image if isinstance(image, str) else image.decode('utf-8')
except (binascii.Error, TypeError):
...
if b := _as_bytesio(image):
return b64encode(b.read()).decode('utf-8')
raise RequestError('image must be bytes, path-like object, or file-like object')
def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]:
if isinstance(s, str) or isinstance(s, Path):
try:
if (p := Path(s)).exists():
return p
except Exception:
...
return None
def _as_bytesio(s: Any) -> Union[io.BytesIO, None]:
if isinstance(s, io.BytesIO):
return s
elif isinstance(s, bytes):
return io.BytesIO(s)
return None
def _parse_host(host: Optional[str]) -> str:
"""
>>> _parse_host(None)
'http://127.0.0.1:11434'
>>> _parse_host('')
'http://127.0.0.1:11434'
>>> _parse_host('1.2.3.4')
'http://1.2.3.4:11434'
>>> _parse_host(':56789')
'http://127.0.0.1:56789'
>>> _parse_host('1.2.3.4:56789')
'http://1.2.3.4:56789'
>>> _parse_host('http://1.2.3.4')
'http://1.2.3.4:80'
>>> _parse_host('https://1.2.3.4')
'https://1.2.3.4:443'
>>> _parse_host('https://1.2.3.4:56789')
'https://1.2.3.4:56789'
>>> _parse_host('example.com')
'http://example.com:11434'
>>> _parse_host('example.com:56789')
'http://example.com:56789'
>>> _parse_host('http://example.com')
'http://example.com:80'
>>> _parse_host('https://example.com')
'https://example.com:443'
>>> _parse_host('https://example.com:56789')
'https://example.com:56789'
>>> _parse_host('example.com/')
'http://example.com:11434'
>>> _parse_host('example.com:56789/')
'http://example.com:56789'
"""
host, port = host or '', 11434
scheme, _, hostport = host.partition('://')
if not hostport:
scheme, hostport = 'http', host
elif scheme == 'http':
port = 80
elif scheme == 'https':
port = 443
split = urllib.parse.urlsplit('://'.join([scheme, hostport]))
host = split.hostname or '127.0.0.1'
port = split.port or port
return f'{scheme}://{host}:{port}'
<fim_middle>while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk) | while True:
chunk = r.read(32 * 1024)
if not chunk:
break
sha256sum.update(chunk) | WHILE | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/conv_filter_bank.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/utils/math.py
def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:
"""Convert xs and ys cartesian coordinates to polar coordinates.
Args:
xs (np.ndarray): x values.
ys (np.ndarray): y values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).
"""
x_rel: np.ndarray = xs - center_x
y_rel: np.ndarray = ys - center_y
C = np.vectorize(complex)(x_rel, y_rel)
rho = np.abs(C)
phi = np.angle(C) % (2 * np.pi)
return rho, phi
# open-iris/src/iris/utils/visualisation.py
def _deserialize_iris_template(self, iris_template: Dict[str, np.ndarray]) -> iris_dc.IrisTemplate:
"""Decode and deserialize iris template.
Args:
iris_template (Dict[str, np.ndarray]): Serialized and iris template.
Returns:
iris_dc.IrisTemplate: Deserialized object.
"""
decoded_iris = iris_template["iris_codes"]
decoded_mask = iris_template["mask_codes"]
return iris_dc.IrisTemplate(
iris_codes=[decoded_iris[..., i] for i in range(decoded_iris.shape[2])],
mask_codes=[decoded_mask[..., i] for i in range(decoded_iris.shape[2])],
)
# open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
"""
from typing import List, Tuple
import numpy as np
from pydantic import root_validator, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import IrisFilterResponse, NormalizedIris
from iris.io.validators import are_lengths_equal, is_not_empty
from iris.nodes.iris_response.image_filters.gabor_filters import GaborFilter
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
from iris.nodes.iris_response.probe_schemas.regular_probe_schema import RegularProbeSchema
def polar_img_padding(img: np.ndarray, p_rows: int, p_cols: int) -> np.ndarray:
"""Apply zero-padding vertically and rotate-padding horizontally to a normalized image in polar coordinates.
Args:
img (np.ndarray): normalized image in polar coordinates.
p_rows (int): padding size on top and bottom.
p_cols (int): padding size on left and right.
Returns:
np.ndarray: padded image.
"""
i_rows, i_cols = img.shape
padded_image = np.zeros((i_rows + 2 * p_rows, i_cols + 2 * p_cols))
padded_image[p_rows : i_rows + p_rows, p_cols : i_cols + p_cols] = img
padded_image[p_rows : i_rows + p_rows, 0:p_cols] = img[:, -p_cols:]
padded_image[p_rows : i_rows + p_rows, -p_cols:] = img[:, 0:p_cols]
return padded_image
class ConvFilterBank(Algorithm):
"""Apply filter bank.
Algorithm steps:
1) Obtain filters and corresponding probe schemas.
2) Apply convolution to a given pair of normalized iris image using the filters and probe schemas.
3) Generate the iris response and corresponding mask response.
"""
class Parameters(Algorithm.Parameters):
"""Default ConvFilterBank parameters."""
filters: List[ImageFilter]
probe_schemas: List[ProbeSchema]
# Validators
_are_lengths_equal = root_validator(pre=True, allow_reuse=True)(are_lengths_equal("probe_schemas", "filters"))
_is_not_empty = validator("*", allow_reuse=True)(is_not_empty)
__parameters_type__ = Parameters
def __init__(
self,
filters: List[ImageFilter] = [
GaborFilter(
kernel_size=(41, 21),
sigma_phi=7,
sigma_rho=6.13,
theta_degrees=90.0,
lambda_phi=28,
dc_correction=True,
to_fixpoints=True,
),
GaborFilter(
kernel_size=(17, 21),
sigma_phi=2,
sigma_rho=5.86,
theta_degrees=90.0,
lambda_phi=8,
dc_correction=True,
to_fixpoints=True,
),
],
probe_schemas: List[ProbeSchema] = [
RegularProbeSchema(n_rows=16, n_cols=256),
RegularProbeSchema(n_rows=16, n_cols=256),
],
) -> None:
"""Assign parameters.
Args:
filters (List[ImageFilter]): List of image filters.
probe_schemas (List[ProbeSchema]): List of corresponding probe schemas.
"""
super().__init__(filters=filters, probe_schemas=probe_schemas)
def run(self, normalization_output: NormalizedIris) -> IrisFilterResponse:
"""Apply filters to a normalized iris image.
Args:
normalization_output (NormalizedIris): Output of the normalization process.
Returns:
IrisFilterResponse: filter responses.
"""
iris_responses: List[np.ndarray] = []
mask_responses: List[np.ndarray] = []
for i_filter, i_schema in zip(self.params.filters, self.params.probe_schemas):
iris_response, mask_response = self._convolve(i_filter, i_schema, normalization_output)
iris_responses.append(iris_response)
mask_responses.append(mask_response)
return IrisFilterResponse(iris_responses=iris_responses, mask_responses=mask_responses)
def _convolve(
self, img_filter: ImageFilter, probe_schema: ProbeSchema, normalization_output: NormalizedIris
) -> Tuple[np.ndarray, np.ndarray]:
<fim_suffix>
i_rows, i_cols = normalization_output.normalized_image.shape
k_rows, k_cols = img_filter.kernel_values.shape
p_rows = k_rows // 2
p_cols = k_cols // 2
iris_response = np.zeros((probe_schema.params.n_rows, probe_schema.params.n_cols), dtype=np.complex64)
mask_response = np.zeros((probe_schema.params.n_rows, probe_schema.params.n_cols))
padded_iris = polar_img_padding(normalization_output.normalized_image, 0, p_cols)
padded_mask = polar_img_padding(normalization_output.normalized_mask, 0, p_cols)
for i in range(probe_schema.params.n_rows):
for j in range(probe_schema.params.n_cols):
# Convert probe_schema position to integer pixel position.
pos = i * probe_schema.params.n_cols + j
r_probe = min(round(probe_schema.rhos[pos] * i_rows), i_rows - 1)
c_probe = min(round(probe_schema.phis[pos] * i_cols), i_cols - 1)
# Get patch from image centered at [i,j] probed pixel position.
rtop = max(0, r_probe - p_rows)
rbot = min(r_probe + p_rows + 1, i_rows - 1)
iris_patch = padded_iris[rtop:rbot, c_probe : c_probe + k_cols]
mask_patch = padded_mask[rtop:rbot, c_probe : c_probe + k_cols]
# Perform convolution at [i,j] probed pixel position.
ktop = p_rows - iris_patch.shape[0] // 2
iris_response[i][j] = (
(iris_patch * img_filter.kernel_values[ktop : ktop + iris_patch.shape[0], :]).sum()
/ iris_patch.shape[0]
/ k_cols
)
mask_response[i][j] = (
0 if iris_response[i][j] == 0 else (mask_patch.sum() / iris_patch.shape[0] / k_cols)
)
return iris_response, mask_response
<fim_middle>"""Apply convolution to a given normalized iris image with the filter and probe schema.
Args:
img_filter (ImageFilter): filter used for convolution.
probe_schema (ProbeSchema): probe schema used for convolution.
normalization_output (NormalizedIris): Output of the normalization process.
Returns:
Tuple[np.ndarray, np.ndarray]: iris response and mask response.
""" | """Apply convolution to a given normalized iris image with the filter and probe schema.
Args:
img_filter (ImageFilter): filter used for convolution.
probe_schema (ProbeSchema): probe schema used for convolution.
normalization_output (NormalizedIris): Output of the normalization process.
Returns:
Tuple[np.ndarray, np.ndarray]: iris response and mask response.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/geometry_refinement/contour_points_filter.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/contour_interpolation.py
def run(self, polygons: GeometryPolygons) -> GeometryPolygons:
"""Refine polygons by interpolating contour points.
Args:
polygons (GeometryPolygons): Polygons to refine.
Returns:
GeometryPolygons: Refined polygons.
"""
max_boundary_dist_in_px = self.params.max_distance_between_boundary_points * polygons.iris_diameter
refined_pupil_array = self._interpolate_polygon_points(polygons.pupil_array, max_boundary_dist_in_px)
refined_iris_array = self._interpolate_polygon_points(polygons.iris_array, max_boundary_dist_in_px)
refined_eyeball_array = self._interpolate_polygon_points(polygons.eyeball_array, max_boundary_dist_in_px)
return GeometryPolygons(
pupil_array=refined_pupil_array,
iris_array=refined_iris_array,
eyeball_array=refined_eyeball_array,
)
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def run(self, polygons: GeometryPolygons, eye_centers: EyeCenters) -> GeometryPolygons:
"""Perform smoothing refinement.
Args:
polygons (GeometryPolygons): Contours to refine.
eye_centers (EyeCenters): Eye center used when performing a coordinates mapping from cartesian space to polar space.
Returns:
GeometryPolygons: Smoothed contours.
"""
pupil_arcs = self._smooth(polygons.pupil_array, (eye_centers.pupil_x, eye_centers.pupil_y))
iris_arcs = self._smooth(polygons.iris_array, (eye_centers.iris_x, eye_centers.iris_y))
return GeometryPolygons(pupil_array=pupil_arcs, iris_array=iris_arcs, eyeball_array=polygons.eyeball_array)
# open-iris/src/iris/nodes/normalization/common.py
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
"""
import cv2
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import GeometryPolygons, NoiseMask
class ContourPointNoiseEyeballDistanceFilter(Algorithm):
"""Implementation of point filtering algorithm that removes points which are to close to eyeball or noise.
The role of this algorithm is to create a buffer around the pupil and iris polygons. This accounts for
potential segmentation imprecisions, making the overall pipeline more robust against edge cases and out-of-distribution images.
The buffer width is computed relatively to the iris diameter: `min_distance_to_noise_and_eyeball * iris_diameter`
The trigger for this buffer are the eyeball boundary and the noise (e.g. eyelashes, specular reflection, etc.).
"""
class Parameters(Algorithm.Parameters):
"""Default ContourPointToNoiseEyeballDistanceFilter parameters."""
min_distance_to_noise_and_eyeball: float = Field(..., gt=0.0, lt=1.0)
__parameters_type__ = Parameters
def __init__(self, min_distance_to_noise_and_eyeball: float = 0.005) -> None:
"""Assign parameters.
Args:
min_distance_to_noise_and_eyeball (float, optional): Minimum distance to eyeball or noise expressed as a fraction of iris diameter length. Defaults to 0.025.
"""
super().__init__(min_distance_to_noise_and_eyeball=min_distance_to_noise_and_eyeball)
def run(self, polygons: GeometryPolygons, geometry_mask: NoiseMask) -> GeometryPolygons:
<fim_suffix>
noise_and_eyeball_polygon_points_mask = geometry_mask.mask.copy()
for eyeball_point in np.round(polygons.eyeball_array).astype(int):
x, y = eyeball_point
noise_and_eyeball_polygon_points_mask[y, x] = True
min_dist_to_noise_and_eyeball_in_px = round(
self.params.min_distance_to_noise_and_eyeball * polygons.iris_diameter
)
forbidden_touch_map = cv2.blur(
noise_and_eyeball_polygon_points_mask.astype(float),
ksize=(
2 * min_dist_to_noise_and_eyeball_in_px + 1,
2 * min_dist_to_noise_and_eyeball_in_px + 1,
),
)
forbidden_touch_map = forbidden_touch_map.astype(bool)
return GeometryPolygons(
pupil_array=self._filter_polygon_points(forbidden_touch_map, polygons.pupil_array),
iris_array=self._filter_polygon_points(forbidden_touch_map, polygons.iris_array),
eyeball_array=polygons.eyeball_array,
)
def _filter_polygon_points(self, forbidden_touch_map: np.ndarray, polygon_points: np.ndarray) -> np.ndarray:
"""Filter polygon's points.
Args:
forbidden_touch_map (np.ndarray): Forbidden touch map. If value of an element is greater then 0 then it means that point is to close to noise or eyeball.
polygon_points (np.ndarray): Polygon's points.
Returns:
np.ndarray: Filtered polygon's points.
"""
valid_points = [not forbidden_touch_map[y, x] for x, y in np.round(polygon_points).astype(int)]
return polygon_points[valid_points]
<fim_middle>"""Perform polygon refinement by filtering out those iris/pupil polygons points which are to close to eyeball or noise.
Args:
polygons (GeometryPolygons): Polygons to refine.
geometry_mask (NoiseMask): Geometry noise mask.
Returns:
GeometryPolygons: Refined geometry polygons.
""" | """Perform polygon refinement by filtering out those iris/pupil polygons points which are to close to eyeball or noise.
Args:
polygons (GeometryPolygons): Polygons to refine.
geometry_mask (NoiseMask): Geometry noise mask.
Returns:
GeometryPolygons: Refined geometry polygons.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/iris_bbox_calculator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/validators/object_validators.py
def __init__(self, min_maskcodes_size: int = 0) -> None:
"""Assign parameters.
Args:
min_maskcodes_size (int): Minimum size of mask codes. If too small, valid iris texture is too small, should be rejected.
"""
super().__init__(min_maskcodes_size=min_maskcodes_size)
# open-iris/src/iris/nodes/geometry_refinement/contour_points_filter.py
def __init__(self, min_distance_to_noise_and_eyeball: float = 0.005) -> None:
"""Assign parameters.
Args:
min_distance_to_noise_and_eyeball (float, optional): Minimum distance to eyeball or noise expressed as a fraction of iris diameter length. Defaults to 0.025.
"""
super().__init__(min_distance_to_noise_and_eyeball=min_distance_to_noise_and_eyeball)
# open-iris/src/iris/nodes/eye_properties_estimation/moment_of_area.py
def __init__(self, eccentricity_threshold: float = 0.1) -> None:
"""Assign parameters.
Args:
eccentricity_threshold: float in [0, 1]. The threshold below which a shape is considered not linear enough to reliably estimate its orientation. Defaults to 0.1.
"""
super().__init__(eccentricity_threshold=eccentricity_threshold)
"""
from numbers import Number
from typing import Tuple, Union
import numpy as np
from pydantic import validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import BoundingBox, GeometryPolygons, IRImage
from iris.io.errors import BoundingBoxEstimationError
from iris.io.validators import are_all_positive
class IrisBBoxCalculator(Algorithm):
"""Calculate the smallest bounding box around the iris polygon, cropped or not, padded or not."""
class Parameters(Algorithm.Parameters):
"""Parameters of the iris bounding box calculator."""
buffer: Union[int, float, Tuple[Number, Number]]
crop: bool
_are_all_positive = validator("buffer", allow_reuse=True)(are_all_positive)
__parameters_type__ = Parameters
def __init__(self, buffer: Union[int, float, Tuple[Number, Number]] = 0, crop: bool = False) -> None:
<fim_suffix>
super().__init__(buffer=buffer, crop=crop)
def run(self, ir_image: IRImage, geometry_polygons: GeometryPolygons) -> BoundingBox:
"""Compute the bounding box around the iris with an additional buffer. Works best on extrapolated polygons.
The buffer's behaviour is explained in the constructor's docstring.
The bounding box will be cropped to the shape of the input IR Image.
Args:
ir_image (IRImage): IR image.
geometry_polygons (GeometryPolygons): polygons, from which the iris polygon (respectively the image shape) used to compute the bounding box (resp. crop the bounding box).
Returns:
BoundingBox: Estimated iris bounding box.
"""
iris_polygon = geometry_polygons.iris_array
image_height, image_width = (ir_image.height, ir_image.width)
buffer = (
(self.params.buffer, self.params.buffer)
if isinstance(self.params.buffer, (int, float))
else self.params.buffer
)
original_x_min: float = np.min(iris_polygon[:, 0])
original_x_max: float = np.max(iris_polygon[:, 0])
original_y_min: float = np.min(iris_polygon[:, 1])
original_y_max: float = np.max(iris_polygon[:, 1])
if original_x_max == original_x_min or original_y_max == original_y_min:
raise BoundingBoxEstimationError(
f"Iris bounding box empty. x_min={original_x_min}, x_max={original_x_max}, "
f"y_min={original_y_min}, y_max={original_y_max}"
)
if isinstance(buffer[0], int):
padded_x_min = original_x_min - buffer[0]
padded_x_max = original_x_max + buffer[0]
else:
bbox_width = original_x_max - original_x_min
padded_x_min = original_x_min - bbox_width * (buffer[0] - 1) / 2
padded_x_max = original_x_max + bbox_width * (buffer[0] - 1) / 2
if isinstance(buffer[1], int):
padded_y_min = original_y_min - buffer[1]
padded_y_max = original_y_max + buffer[1]
else:
bbox_height = original_y_max - original_y_min
padded_y_min = original_y_min - bbox_height * (buffer[1] - 1) / 2
padded_y_max = original_y_max + bbox_height * (buffer[1] - 1) / 2
if self.params.crop:
padded_x_min = max(padded_x_min, 0)
padded_x_max = min(padded_x_max, image_width)
padded_y_min = max(padded_y_min, 0)
padded_y_max = min(padded_y_max, image_height)
return BoundingBox(x_min=padded_x_min, x_max=padded_x_max, y_min=padded_y_min, y_max=padded_y_max)
<fim_middle>"""Assign parameters.
WARNING: Depending on wether `buffer` is a float or an int, behaviour differs.
`buffer=2.0` => the iris size will be multiplied by 2., `buffer=2` => 2 pixels padding will be added.
Args:
buffer (Union[int, float, Tuple[Number, Number]], optional): Iris buffer in pixels.
if `int`, the bounding box will be padded by `buffer` pixels in each direction.
if `float`, the bounding box' height and width will be multiplied by `buffer`.
if `Tuple[int]`, the bounding box will be padded by `buffer[0]` pixels in the x direction
(left and right) and `buffer[1]` pixels in the y direction (top and bottom).
if `Tuple[float]`, the bounding box width will be multiplied by `buffer[0]` and height by `buffer[1]`.
crop (bool, optional): If True, the bounding box will be cropped to the shape of the input IR Image. Defaults to False.
""" | """Assign parameters.
WARNING: Depending on wether `buffer` is a float or an int, behaviour differs.
`buffer=2.0` => the iris size will be multiplied by 2., `buffer=2` => 2 pixels padding will be added.
Args:
buffer (Union[int, float, Tuple[Number, Number]], optional): Iris buffer in pixels.
if `int`, the bounding box will be padded by `buffer` pixels in each direction.
if `float`, the bounding box' height and width will be multiplied by `buffer`.
if `Tuple[int]`, the bounding box will be padded by `buffer[0]` pixels in the x direction
(left and right) and `buffer[1]` pixels in the y direction (top and bottom).
if `Tuple[float]`, the bounding box width will be multiplied by `buffer[0]` and height by `buffer[1]`.
crop (bool, optional): If True, the bounding box will be cropped to the shape of the input IR Image. Defaults to False.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
<fim_suffix>
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
""" | """Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/callbacks/pipeline_trace.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/dataclasses.py
def pupil_diameter(self) -> float:
"""Return pupil diameter.
Returns:
float: pupil diameter.
"""
return math.estimate_diameter(self.pupil_array)
# open-iris/src/iris/io/dataclasses.py
def iris_diameter(self) -> float:
"""Return iris diameter.
Returns:
float: iris diameter.
"""
return math.estimate_diameter(self.iris_array)
# open-iris/src/iris/orchestration/output_builders.py
def build_orb_output(call_trace: PipelineCallTraceStorage) -> Dict[str, Any]:
"""Build the output for the Orb.
Args:
call_trace (PipelineCallTraceStorage): Pipeline call results storage.
Returns:
Dict[str, Any]: {
"iris_template": (Optional[Dict]) the iris template dict if the pipeline succeeded,
"error": (Optional[Dict]) the error dict if the pipeline returned an error,
"metadata": (Dict) the metadata dict,
}.
"""
iris_template = __safe_serialize(call_trace["encoder"])
metadata = __get_metadata(call_trace=call_trace)
error = __get_error(call_trace=call_trace)
exception = call_trace.get_error()
if exception is None:
iris_template = __safe_serialize(call_trace["encoder"])
error = None
elif isinstance(exception, Exception):
iris_template = None
error = {
"error_type": type(exception).__name__,
"message": str(exception),
"traceback": "".join(traceback.format_tb(exception.__traceback__)),
}
output = {
"error": error,
"iris_template": iris_template,
"metadata": metadata,
}
return output
"""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.orchestration.pipeline_dataclasses import PipelineNode
class PipelineCallTraceStorageError(Exception):
"""PipelineCallTraceStorage error class."""
pass
class PipelineCallTraceStorage:
"""A storage object for pipeline input, intermediate and final results."""
INPUT_KEY_NAME = "input"
ERROR_KEY_NAME = "error"
def __init__(self, results_names: Iterable[str]) -> None:
"""Assign parameters.
Args:
results_names (Iterable[str]): Create list of available keys in the storage.
"""
self._storage = self._init_storage(results_names)
def __getitem__(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
return self.get(result_name)
def __len__(self) -> int:
"""Get storage capacity.
Returns:
int: Storage capacity
"""
return len(self._storage.keys())
def get(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
if result_name not in self._storage.keys():
raise PipelineCallTraceStorageError(f"Unknown result name: {result_name}")
return self._storage[result_name]
def get_input(self) -> Any:
"""Return pipeline input.
Returns:
Any: Input to pipeline.
"""
return self.get(PipelineCallTraceStorage.INPUT_KEY_NAME)
def get_error(self) -> Optional[Exception]:
<fim_suffix>
return self.get(PipelineCallTraceStorage.ERROR_KEY_NAME)
def write(self, result_name: str, result: Any) -> None:
"""Write a result to a storage saved under the name `result_name`.
Args:
result_name (str): Result name.
result (Any): Result reference to save.
"""
self._storage[result_name] = result
def write_input(self, in_value: Any) -> None:
"""Save `in_value` in storage.
Args:
in_value (Any): Input value.
"""
self._storage[PipelineCallTraceStorage.INPUT_KEY_NAME] = in_value
def write_error(self, error: Exception) -> None:
"""Save `error` in storage.
Args:
error (Exception): error to store.
"""
self._storage[PipelineCallTraceStorage.ERROR_KEY_NAME] = error
def clean(self) -> None:
"""Clean storage by setting all result references to None."""
for result_name in self._storage.keys():
self._storage[result_name] = None
def _init_storage(self, results_names: Iterable[str]) -> Dict[str, None]:
"""Initialize storage (dict) with proper names and None values as results.
Args:
results_names (Iterable[str]): Result names.
Returns:
Dict[str, None]: Storage dictionary.
"""
storage = {name: None for name in results_names}
storage[PipelineCallTraceStorage.INPUT_KEY_NAME] = None
storage[PipelineCallTraceStorage.ERROR_KEY_NAME] = None
return storage
@staticmethod
def initialise(nodes: Dict[str, Algorithm], pipeline_nodes: List[PipelineNode]) -> PipelineCallTraceStorage:
"""Instantiate mechanisms for intermediate results tracing.
Args:
nodes (Dict[str, Algorithm]): Mapping between nodes names and the corresponding instanciated nodes.
pipeline_nodes (List[PipelineNode]): List of nodes as declared in the input config. Not used in this function.
Returns:
PipelineCallTraceStorage: Pipeline intermediate and final results storage.
"""
call_trace = PipelineCallTraceStorage(results_names=nodes.keys())
for algorithm_name, algorithm_object in nodes.items():
algorithm_object._callbacks.append(NodeResultsWriter(call_trace, algorithm_name))
return call_trace
class NodeResultsWriter(Callback):
"""A node call results writer Callback class."""
def __init__(self, trace_storage_reference: PipelineCallTraceStorage, result_name: str) -> None:
"""Assign parameters.
Args:
trace_storage_reference (PipelineCallTraceStorage): Storage object reference to write.
result_name (str): Result name under which result should be written.
"""
self._trace_storage_reference = trace_storage_reference
self._result_name = result_name
def on_execute_end(self, result: Any) -> None:
"""Write on node execution end.
Args:
result (Any): Result of node call.
"""
self._trace_storage_reference.write(self._result_name, result)
<fim_middle>"""Return stored error.
Returns:
Optional[Exception]: error.
""" | """Return stored error.
Returns:
Optional[Exception]: error.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/validators/cross_object_validators.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/contour_points_filter.py
def __init__(self, min_distance_to_noise_and_eyeball: float = 0.005) -> None:
"""Assign parameters.
Args:
min_distance_to_noise_and_eyeball (float, optional): Minimum distance to eyeball or noise expressed as a fraction of iris diameter length. Defaults to 0.025.
"""
super().__init__(min_distance_to_noise_and_eyeball=min_distance_to_noise_and_eyeball)
# open-iris/src/iris/nodes/geometry_refinement/contour_interpolation.py
def __init__(self, max_distance_between_boundary_points: float = 0.01) -> None:
"""Assign parameters.
Args:
max_distance_between_boundary_points (float, optional): Maximum distance between boundary contour points expressed as a fraction of a iris diameter length. Defaults to 0.01.
"""
super().__init__(max_distance_between_boundary_points=max_distance_between_boundary_points)
# open-iris/src/iris/nodes/binarization/specular_reflection_detection.py
def __init__(self, reflection_threshold: int = 254) -> None:
"""Assign parameters.
Args:
reflection_threshold (int, optional): Specular Reflection minimal brightness threshold. Defaults to 254.
"""
super().__init__(reflection_threshold=reflection_threshold)
"""
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons, IRImage
from iris.io.errors import ExtrapolatedPolygonsInsideImageValidatorError, EyeCentersInsideImageValidatorError
class EyeCentersInsideImageValidator(Algorithm):
"""Validate that the eye center are not too close to the border.
Raises:
EyeCentersInsideImageValidatorError: If pupil or iris center are strictly less than `min_distance_to_border`
pixel of the image boundary.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for EyeCentersInsideImageValidator objects."""
min_distance_to_border: float
__parameters_type__ = Parameters
def __init__(self, min_distance_to_border: float = 0.0) -> None:
<fim_suffix>
super().__init__(min_distance_to_border=min_distance_to_border)
def run(self, ir_image: IRImage, eye_centers: EyeCenters) -> None:
"""Validate if eye centers are within proper image boundaries.
Args:
ir_image (IRImage): IR image
eye_centers (EyeCenters): Eye centers
Raises:
EyeCentersInsideImageValidatorError: Raised if pupil or iris center is not in within correct image boundary.
"""
if not self._check_center_valid(eye_centers.pupil_x, eye_centers.pupil_y, ir_image):
raise EyeCentersInsideImageValidatorError("Pupil center is not in allowed image boundary.")
if not self._check_center_valid(eye_centers.iris_x, eye_centers.iris_y, ir_image):
raise EyeCentersInsideImageValidatorError("Iris center is not in allowed image boundary.")
def _check_center_valid(self, center_x: float, center_y: float, ir_image: IRImage) -> bool:
"""Check if center point is within proper image bound.
Args:
center_x (float): Center x
center_y (float): Center y
ir_image (IRImage): IR image object
Returns:
bool: Result of the check.
"""
return (
self.params.min_distance_to_border <= center_x <= ir_image.width - self.params.min_distance_to_border
and self.params.min_distance_to_border <= center_y <= ir_image.height - self.params.min_distance_to_border
)
class ExtrapolatedPolygonsInsideImageValidator(Algorithm):
"""Validate that GeometryPolygons are included within the image to a certain minimum percentage.
Raises:
ExtrapolatedPolygonsInsideImageValidatorError: If the number of points of the pupil/iris/eyeball
that are within the input image is below threshold.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for ExtrapolatedPolygonsInsideImageValidator objects."""
min_pupil_allowed_percentage: float = Field(..., ge=0.0, le=1.0)
min_iris_allowed_percentage: float = Field(..., ge=0.0, le=1.0)
min_eyeball_allowed_percentage: float = Field(..., ge=0.0, le=1.0)
__parameters_type__ = Parameters
def __init__(
self,
min_pupil_allowed_percentage: float = 0.0,
min_iris_allowed_percentage: float = 0.0,
min_eyeball_allowed_percentage: float = 0.0,
) -> None:
"""Assign parameters.
Args:
min_pupil_allowed_percentage (float, optional): Minimum allowed percentage of extrapolated pupil polygons that must be within an image.
Defaults to 0.0 (Entire extrapolated polygon may be outside of an image).
min_iris_allowed_percentage (float, optional): Minimum allowed percentage of extrapolated iris polygons that must be within an image.
Defaults to 0.0 (Entire extrapolated polygon may be outside of an image).
min_eyeball_allowed_percentage (float, optional): Minimum allowed percentage of extrapolated eyeball polygons that must be within an image.
Defaults to 0.0 (Entire extrapolated polygon may be outside of an image).
"""
super().__init__(
min_pupil_allowed_percentage=min_pupil_allowed_percentage,
min_iris_allowed_percentage=min_iris_allowed_percentage,
min_eyeball_allowed_percentage=min_eyeball_allowed_percentage,
)
def run(self, ir_image: IRImage, extrapolated_polygons: GeometryPolygons) -> None:
"""Perform validation.
Args:
ir_image (IRImage): IR image.
extrapolated_polygons (GeometryPolygons): Extrapolated polygons.
Raises:
ExtrapolatedPolygonsInsideImageValidatorError: Raised if not enough points of the pupil/iris/eyeball are within an image.
"""
if not self._check_correct_percentage(
extrapolated_polygons.pupil_array, self.params.min_pupil_allowed_percentage, ir_image
):
raise ExtrapolatedPolygonsInsideImageValidatorError("Not enough pupil points are within an image.")
if not self._check_correct_percentage(
extrapolated_polygons.iris_array, self.params.min_iris_allowed_percentage, ir_image
):
raise ExtrapolatedPolygonsInsideImageValidatorError("Not enough iris points are within an image.")
if not self._check_correct_percentage(
extrapolated_polygons.eyeball_array, self.params.min_eyeball_allowed_percentage, ir_image
):
raise ExtrapolatedPolygonsInsideImageValidatorError("Not enough eyeball points are within an image.")
def _check_correct_percentage(self, polygon: np.ndarray, min_allowed_percentage: float, ir_image: IRImage) -> bool:
"""Check percentage of points withing image based on minimal specified threshold.
Args:
polygon (np.ndarray): polygon to verify.
min_allowed_percentage (float): minimal allowed percentage of points that must be within an image.
ir_image (IRImage): ir image object.
Returns:
bool: Check result.
"""
num_points_inside_image: float = np.sum(
np.all(np.logical_and((0, 0) <= polygon, polygon <= (ir_image.width, ir_image.height)), axis=1)
)
percentage_points_inside_image = num_points_inside_image / len(polygon)
return percentage_points_inside_image >= min_allowed_percentage
<fim_middle>"""Assign parameters.
Args:
min_distance_to_border (float, optional): Minimum allowed distance to image boundary.
Defaults to 0.0 (Eye centers can be at the image border).
""" | """Assign parameters.
Args:
min_distance_to_border (float, optional): Minimum allowed distance to image boundary.
Defaults to 0.0 (Eye centers can be at the image border).
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/callbacks/pipeline_trace.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/dataclasses.py
def index_of(self, class_name: str) -> int:
"""Get class index based on its name.
Args:
class_name (str): Class name
Raises:
ValueError: Index of a class
Returns:
int: Raised if `class_name` not found in `index2class` dictionary.
"""
for index, name in self.index2class.items():
if name == class_name:
return index
raise ValueError(f"Index for the `{class_name}` not found")
# open-iris/src/iris/orchestration/output_builders.py
def __safe_serialize(object: Optional[ImmutableModel]) -> Optional[Dict[str, Any]]:
"""Serialize an object.
Args:
object (Optional[ImmutableModel]): Object to be serialized.
Raises:
NotImplementedError: Raised if object is not serializable.
Returns:
Optional[Dict[str, Any]]: Serialized object.
"""
if object is None:
return None
elif isinstance(object, ImmutableModel):
return object.serialize()
elif isinstance(object, (list, tuple)):
return [__safe_serialize(sub_object) for sub_object in object]
else:
raise NotImplementedError(f"Object of type {type(object)} is not serializable.")
# open-iris/src/iris/nodes/validators/object_validators.py
def run(self, val_arguments: Offgaze) -> None:
"""Validate of offgaze estimation algorithm.
Args:
val_arguments (Offgaze): Computed result.
Raises:
E.OffgazeEstimationError: Raised if result isn't greater then specified threshold.
"""
if not (val_arguments.score <= self.params.max_allowed_offgaze):
raise E.OffgazeEstimationError(
f"offgaze={val_arguments.score} > max_allowed_offgaze={self.params.max_allowed_offgaze}"
)
"""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.orchestration.pipeline_dataclasses import PipelineNode
class PipelineCallTraceStorageError(Exception):
"""PipelineCallTraceStorage error class."""
pass
class PipelineCallTraceStorage:
"""A storage object for pipeline input, intermediate and final results."""
INPUT_KEY_NAME = "input"
ERROR_KEY_NAME = "error"
def __init__(self, results_names: Iterable[str]) -> None:
"""Assign parameters.
Args:
results_names (Iterable[str]): Create list of available keys in the storage.
"""
self._storage = self._init_storage(results_names)
def __getitem__(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
return self.get(result_name)
def __len__(self) -> int:
"""Get storage capacity.
Returns:
int: Storage capacity
"""
return len(self._storage.keys())
def get(self, result_name: str) -> Any:
<fim_suffix>
if result_name not in self._storage.keys():
raise PipelineCallTraceStorageError(f"Unknown result name: {result_name}")
return self._storage[result_name]
def get_input(self) -> Any:
"""Return pipeline input.
Returns:
Any: Input to pipeline.
"""
return self.get(PipelineCallTraceStorage.INPUT_KEY_NAME)
def get_error(self) -> Optional[Exception]:
"""Return stored error.
Returns:
Optional[Exception]: error.
"""
return self.get(PipelineCallTraceStorage.ERROR_KEY_NAME)
def write(self, result_name: str, result: Any) -> None:
"""Write a result to a storage saved under the name `result_name`.
Args:
result_name (str): Result name.
result (Any): Result reference to save.
"""
self._storage[result_name] = result
def write_input(self, in_value: Any) -> None:
"""Save `in_value` in storage.
Args:
in_value (Any): Input value.
"""
self._storage[PipelineCallTraceStorage.INPUT_KEY_NAME] = in_value
def write_error(self, error: Exception) -> None:
"""Save `error` in storage.
Args:
error (Exception): error to store.
"""
self._storage[PipelineCallTraceStorage.ERROR_KEY_NAME] = error
def clean(self) -> None:
"""Clean storage by setting all result references to None."""
for result_name in self._storage.keys():
self._storage[result_name] = None
def _init_storage(self, results_names: Iterable[str]) -> Dict[str, None]:
"""Initialize storage (dict) with proper names and None values as results.
Args:
results_names (Iterable[str]): Result names.
Returns:
Dict[str, None]: Storage dictionary.
"""
storage = {name: None for name in results_names}
storage[PipelineCallTraceStorage.INPUT_KEY_NAME] = None
storage[PipelineCallTraceStorage.ERROR_KEY_NAME] = None
return storage
@staticmethod
def initialise(nodes: Dict[str, Algorithm], pipeline_nodes: List[PipelineNode]) -> PipelineCallTraceStorage:
"""Instantiate mechanisms for intermediate results tracing.
Args:
nodes (Dict[str, Algorithm]): Mapping between nodes names and the corresponding instanciated nodes.
pipeline_nodes (List[PipelineNode]): List of nodes as declared in the input config. Not used in this function.
Returns:
PipelineCallTraceStorage: Pipeline intermediate and final results storage.
"""
call_trace = PipelineCallTraceStorage(results_names=nodes.keys())
for algorithm_name, algorithm_object in nodes.items():
algorithm_object._callbacks.append(NodeResultsWriter(call_trace, algorithm_name))
return call_trace
class NodeResultsWriter(Callback):
"""A node call results writer Callback class."""
def __init__(self, trace_storage_reference: PipelineCallTraceStorage, result_name: str) -> None:
"""Assign parameters.
Args:
trace_storage_reference (PipelineCallTraceStorage): Storage object reference to write.
result_name (str): Result name under which result should be written.
"""
self._trace_storage_reference = trace_storage_reference
self._result_name = result_name
def on_execute_end(self, result: Any) -> None:
"""Write on node execution end.
Args:
result (Any): Result of node call.
"""
self._trace_storage_reference.write(self._result_name, result)
<fim_middle>"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
""" | """Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/geometry_refinement/smoothing.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_estimation/linear_extrapolation.py
def _estimate(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Estimate a circle fit for a single contour.
Args:
vertices (np.ndarray): Contour's vertices.
center_xy (Tuple[float, float]): Contour's center position.
Returns:
np.ndarray: Estimated polygon.
"""
rhos, phis = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_rhos = np.concatenate([rhos, rhos, rhos])
padded_phis = np.concatenate([phis - 2 * np.pi, phis, phis + 2 * np.pi])
interpolated_phis = np.arange(padded_phis.min(), padded_phis.max(), np.radians(self.params.dphi))
interpolated_rhos = np.interp(interpolated_phis, xp=padded_phis, fp=padded_rhos, period=2 * np.pi)
mask = (interpolated_phis >= 0) & (interpolated_phis < 2 * np.pi)
interpolated_phis, interpolated_rhos = interpolated_phis[mask], interpolated_rhos[mask]
xs, ys = math.polar2cartesian(interpolated_rhos, interpolated_phis, *center_xy)
estimated_vertices = np.column_stack([xs, ys])
return estimated_vertices
# open-iris/src/iris/nodes/geometry_refinement/contour_interpolation.py
def _interpolate_polygon_points(self, polygon: np.ndarray, max_distance_between_points_px: float) -> np.ndarray:
"""Interpolate contours points, so that the distance between two is no greater than `self.params.max_distance_between_boundary_points` in pixel space.
Args:
polygon (np.ndarray): Contour polygons.
max_distance_between_points_px (float): `self.params.max_distance_between_boundary_points` expressed in pixel length relative to iris diameter.
Returns:
np.ndarray: Interpolated polygon points.
"""
previous_boundary = np.roll(polygon, shift=1, axis=0)
distances = np.linalg.norm(polygon - previous_boundary, axis=1)
num_points = np.ceil(distances / max_distance_between_points_px).astype(int)
x: List[np.ndarray] = []
y: List[np.ndarray] = []
for (x1, y1), (x2, y2), num_point in zip(previous_boundary, polygon, num_points):
x.append(np.linspace(x1, x2, num=num_point, endpoint=False))
y.append(np.linspace(y1, y2, num=num_point, endpoint=False))
new_boundary = np.stack([np.concatenate(x), np.concatenate(y)], axis=1)
_, indices = np.unique(new_boundary, axis=0, return_index=True)
new_boundary = new_boundary[np.sort(indices)]
return new_boundary
# open-iris/src/iris/utils/math.py
def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:
"""Convert xs and ys cartesian coordinates to polar coordinates.
Args:
xs (np.ndarray): x values.
ys (np.ndarray): y values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).
"""
x_rel: np.ndarray = xs - center_x
y_rel: np.ndarray = ys - center_y
C = np.vectorize(complex)(x_rel, y_rel)
rho = np.abs(C)
phi = np.angle(C) % (2 * np.pi)
return rho, phi
"""
from typing import List, Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import GeometryRefinementError
from iris.utils import math
class Smoothing(Algorithm):
"""Implementation of contour smoothing algorithm.
Algorithm steps:
1) Map iris/pupil points to polar space based on estimated iris/pupil centers.
2) Smooth iris/pupil contour by applying 1D convolution with rolling median kernel approach.
3) Map points back to cartesian space from polar space.
"""
class Parameters(Algorithm.Parameters):
"""Smoothing parameters class."""
dphi: float = Field(..., gt=0.0, lt=360.0)
kernel_size: float = Field(..., gt=0.0, lt=360.0)
gap_threshold: float = Field(..., gt=0.0, lt=360.0)
__parameters_type__ = Parameters
def __init__(self, dphi: float = 1.0, kernel_size: float = 10.0, gap_threshold: float = 10.0) -> None:
"""Assign parameters.
Args:
dphi (float, optional): phi angle delta used to sample points while doing smoothing by interpolation. Defaults to 1.0.
kernel_size (float, optional): Rolling median kernel size expressed in radians. Final kernel size is computed as a quotient of kernel_size and dphi. Defaults to 10.0.
gap_threshold (float, optional): Gap threshold distance. Defaults to None. Defaults to 10.0.
"""
super().__init__(dphi=dphi, kernel_size=kernel_size, gap_threshold=gap_threshold)
@property
def kernel_offset(self) -> int:
"""Kernel offset (distance from kernel center to border) property used when smoothing with rolling median. If a quotient is less then 1 then kernel size equal to 1 is returned.
Returns:
int: Kernel size.
"""
return max(1, int((np.radians(self.params.kernel_size) / np.radians(self.params.dphi))) // 2)
def run(self, polygons: GeometryPolygons, eye_centers: EyeCenters) -> GeometryPolygons:
"""Perform smoothing refinement.
Args:
polygons (GeometryPolygons): Contours to refine.
eye_centers (EyeCenters): Eye center used when performing a coordinates mapping from cartesian space to polar space.
Returns:
GeometryPolygons: Smoothed contours.
"""
pupil_arcs = self._smooth(polygons.pupil_array, (eye_centers.pupil_x, eye_centers.pupil_y))
iris_arcs = self._smooth(polygons.iris_array, (eye_centers.iris_x, eye_centers.iris_y))
return GeometryPolygons(pupil_array=pupil_arcs, iris_array=iris_arcs, eyeball_array=polygons.eyeball_array)
def _smooth(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour.
Args:
polygon (np.ndarray): Contour to smooth.
center_xy (Tuple[float, float]): Contour's center.
Returns:
np.ndarray: Smoothed contour's vertices.
"""
arcs, num_gaps = self._cut_into_arcs(polygon, center_xy)
arcs = (
self._smooth_circular_shape(arcs[0], center_xy)
if num_gaps == 0
else np.vstack([self._smooth_arc(arc, center_xy) for arc in arcs if len(arc) >= 2])
)
return arcs
def _cut_into_arcs(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> Tuple[List[np.ndarray], int]:
<fim_suffix>
rho, phi = math.cartesian2polar(polygon[:, 0], polygon[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
differences = np.abs(phi - np.roll(phi, -1))
# True distance between first and last point
differences[-1] = 2 * np.pi - differences[-1]
gap_indices = np.argwhere(differences > np.radians(self.params.gap_threshold)).flatten()
if gap_indices.size < 2:
return [polygon], gap_indices.size
gap_indices += 1
phi, rho = np.split(phi, gap_indices), np.split(rho, gap_indices)
arcs = [
np.column_stack(math.polar2cartesian(rho_coords, phi_coords, *center_xy))
for rho_coords, phi_coords in zip(rho, phi)
]
# Connect arc which lies between 0 and 2π.
if len(arcs) == gap_indices.size + 1:
arcs[0] = np.vstack([arcs[0], arcs[-1]])
arcs = arcs[:-1]
return arcs, gap_indices.size
def _smooth_arc(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour arc.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
idx = self._find_start_index(phi)
offset = phi[idx]
relative_phi = (phi - offset) % (2 * np.pi)
smoothed_relative_phi, smoothed_rho = self._smooth_array(relative_phi, rho)
smoothed_phi = (smoothed_relative_phi + offset) % (2 * np.pi)
x_smoothed, y_smoothed = math.polar2cartesian(smoothed_rho, smoothed_phi, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_array(self, phis: np.ndarray, rhos: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Smooth coordinates expressed in polar space.
Args:
phis (np.ndarray): phi values.
rhos (np.ndarray): rho values.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with smoothed coordinates (phis, rhos).
"""
interpolated_phi = np.arange(min(phis), max(phis), np.radians(self.params.dphi))
interpolated_rho = np.interp(interpolated_phi, xp=phis, fp=rhos, period=2 * np.pi)
smoothed_rho = self._rolling_median(interpolated_rho, self.kernel_offset)
smoothed_phi = interpolated_phi[self.kernel_offset : -self.kernel_offset]
return smoothed_phi, smoothed_rho
def _sort_two_arrays(self, first_list: np.ndarray, second_list: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Sort both numpy arrays based on values from the first_list.
Args:
first_list (np.ndarray): First array.
second_list (np.ndarray): Second array.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with (sorted first array, sorted second array).
"""
zipped_lists = zip(first_list, second_list)
sorted_pairs = sorted(zipped_lists)
sorted_tuples = zip(*sorted_pairs)
first_list, second_list = [list(sorted_tuple) for sorted_tuple in sorted_tuples]
return np.array(first_list), np.array(second_list)
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
def _rolling_median(self, signal: np.ndarray, kernel_offset: int) -> np.ndarray:
"""Compute rolling median of a 1D signal.
Args:
signal (np.ndarray): Signal values.
kernel_size (int): Kernel size.
Raises:
GeometryRefinementError: Raised if signal is not 1D.
Returns:
np.ndarray: Rolling median result.
"""
if signal.ndim != 1:
raise GeometryRefinementError("Smoothing._rolling_median only works for 1d arrays.")
stacked_signals: List[np.ndarray] = []
for i in range(-kernel_offset, kernel_offset + 1):
stacked_signals.append(np.roll(signal, i))
stacked_signals = np.stack(stacked_signals)
rolling_median = np.median(stacked_signals, axis=0)
rolling_median = rolling_median[kernel_offset:-kernel_offset]
return rolling_median
<fim_middle>"""Cut contour into arcs.
Args:
polygon (np.ndarray): Contour polygon.
center_xy (Tuple[float, float]): Polygon's center.
Returns:
Tuple[List[np.ndarray], int]: Tuple with: (list of list of vertices, number of gaps detected in a contour).
""" | """Cut contour into arcs.
Args:
polygon (np.ndarray): Contour polygon.
center_xy (Tuple[float, float]): Polygon's center.
Returns:
Tuple[List[np.ndarray], int]: Tuple with: (list of list of vertices, number of gaps detected in a contour).
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/pupil_iris_property_calculator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/eye_properties_estimation/eccentricity_offgaze_estimation.py
def __init__(
self,
assembling_method: Literal["min", "max", "mean", "only_pupil", "only_iris"] = "min",
eccentricity_method: Literal["moments", "ellipse_fit", "ellipse_fit_direct", "ellipse_fit_ams"] = "moments",
callbacks: List[Callback] = [],
) -> None:
"""Assign parameters.
Args:
assembling_method (Literal["min", "max", "mean", "only_pupil", "only_iris"], optional): How are the pupil eccentricity and iris eccentricity assembled. Defaults to "min".
eccentricity_method (Literal["moments", "ellipse_fit", "ellipse_fit_direct", "ellipse_fit_ams"], optional): How is the eccentricity determined. Defaults to "moments".
callbacks (List[Callback]): callbacks list. Defaults to [].
"""
super().__init__(
assembling_method=assembling_method, eccentricity_method=eccentricity_method, callbacks=callbacks
)
# open-iris/src/iris/nodes/encoder/iris_encoder.py
def __init__(self, mask_threshold: float = 0.9, callbacks: List[Callback] = []) -> None:
"""Assign parameters.
Args:
mask_threshold (float): threshold to binarize mask_responses, in the range of [0,1]. Defaults to 0.9.
callbacks (List[Callback]): callbacks list. Defaults to [].
"""
super().__init__(mask_threshold=mask_threshold, callbacks=callbacks)
# open-iris/src/iris/nodes/geometry_estimation/linear_extrapolation.py
def __init__(self, dphi: float = 0.9, callbacks: List[Callback] = []) -> None:
"""Assign parameters.
Args:
dphi (float, optional): phi angle delta used to sample points while doing smoothing by interpolation. Defaults to 0.9.
callbacks (List[Callback]): callbacks list. Defaults to [].
"""
super().__init__(dphi=dphi, callbacks=callbacks)
"""
from typing import List
from pydantic import Field
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons, PupilToIrisProperty
from iris.io.errors import PupilIrisPropertyEstimationError
class PupilIrisPropertyCalculator(Algorithm):
"""Computes pupil-to-iris properties.
Algorithm steps:
(1) Calculate pupil diameter to iris diameter ratio, i.e. pupil dilation.
(2) Calculate the ratio of the pupil center to iris center distance over the iris diameter.
"""
class Parameters(Algorithm.Parameters):
"""PupilIrisPropertyCalculator parameters.
min_pupil_diameter (float): threshold of pupil diameter, below which the pupil is too small. min_pupil_diameter should be higher than 0.
min_iris_diameter (float): threshold of iris diameter, below which the iris is too small. min_iris_diameter should be higher than 0.
"""
min_pupil_diameter: float = Field(..., gt=0.0)
min_iris_diameter: float = Field(..., gt=0.0)
__parameters_type__ = Parameters
def __init__(
self,
min_pupil_diameter: float = 1.0,
min_iris_diameter: float = 150.0,
callbacks: List[Callback] = [],
) -> None:
<fim_suffix>
super().__init__(
min_pupil_diameter=min_pupil_diameter,
min_iris_diameter=min_iris_diameter,
callbacks=callbacks,
)
def run(self, geometries: GeometryPolygons, eye_centers: EyeCenters) -> PupilToIrisProperty:
"""Calculate pupil-to-iris property.
Args:
geometries (GeometryPolygons): polygons used for calculating pupil-ro-iris property.
eye_centers (EyeCenters): eye centers used for calculating pupil-ro-iris property.
Raises:
PupilIrisPropertyEstimationError: Raised if 1) the pupil or iris diameter is too small, 2) pupil diameter is larger than or equal to iris diameter, 3) pupil center is outside iris.
Returns:
PupilToIrisProperty: pupil-ro-iris property object.
"""
iris_diameter = geometries.iris_diameter
pupil_diameter = geometries.pupil_diameter
if pupil_diameter < self.params.min_pupil_diameter:
raise PupilIrisPropertyEstimationError("Pupil diameter is too small!")
if iris_diameter < self.params.min_iris_diameter:
raise PupilIrisPropertyEstimationError("Iris diameter is too small!")
if pupil_diameter >= iris_diameter:
raise PupilIrisPropertyEstimationError("Pupil diameter is larger than/equal to Iris diameter!")
if eye_centers.center_distance * 2 >= iris_diameter:
raise PupilIrisPropertyEstimationError("Pupil center is outside iris!")
return PupilToIrisProperty(
pupil_to_iris_diameter_ratio=pupil_diameter / iris_diameter,
pupil_to_iris_center_dist_ratio=eye_centers.center_distance * 2 / iris_diameter,
)
<fim_middle>"""Assign parameters.
Args:
min_pupil_diameter (float): minimum pupil diameter. Defaults to 1.0.
min_iris_diameter (float): minimum iris diameter. Defaults to 150.0.
callbacks (List[Callback]): callbacks list. Defaults to [].
""" | """Assign parameters.
Args:
min_pupil_diameter (float): minimum pupil diameter. Defaults to 1.0.
min_iris_diameter (float): minimum iris diameter. Defaults to 150.0.
callbacks (List[Callback]): callbacks list. Defaults to [].
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/vectorization/contouring.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/contour_points_filter.py
def __init__(self, min_distance_to_noise_and_eyeball: float = 0.005) -> None:
"""Assign parameters.
Args:
min_distance_to_noise_and_eyeball (float, optional): Minimum distance to eyeball or noise expressed as a fraction of iris diameter length. Defaults to 0.025.
"""
super().__init__(min_distance_to_noise_and_eyeball=min_distance_to_noise_and_eyeball)
# open-iris/src/iris/nodes/validators/cross_object_validators.py
def __init__(self, min_distance_to_border: float = 0.0) -> None:
"""Assign parameters.
Args:
min_distance_to_border (float, optional): Minimum allowed distance to image boundary.
Defaults to 0.0 (Eye centers can be at the image border).
"""
super().__init__(min_distance_to_border=min_distance_to_border)
# open-iris/src/iris/nodes/binarization/specular_reflection_detection.py
def __init__(self, reflection_threshold: int = 254) -> None:
"""Assign parameters.
Args:
reflection_threshold (int, optional): Specular Reflection minimal brightness threshold. Defaults to 254.
"""
super().__init__(reflection_threshold=reflection_threshold)
"""
from typing import Callable, List
import cv2
import numpy as np
from pydantic import NonNegativeFloat
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import GeometryMask, GeometryPolygons
from iris.io.errors import VectorizationError
from iris.utils.math import area
def filter_polygon_areas(
polygons: List[np.ndarray], rel_tr: NonNegativeFloat = 0.03, abs_tr: NonNegativeFloat = 0.0
) -> List[np.ndarray]:
"""Filter out polygons whose area is below either an absolute threshold or a fraction of the largest area.
Args:
polygons (List[np.ndarray]): List of polygons to filter.
rel_tr (NonNegativeFloat, optional): Relative threshold. Defaults to 0.03.
abs_tr (NonNegativeFloat, optional): Absolute threshold. Defaults to 0.0.
Returns:
List[np.ndarray]: Filtered polygons' list.
"""
areas = [area(polygon) if len(polygon) > 2 else 1.0 for polygon in polygons]
area_factors = np.array(areas) / np.max(areas)
filtered_polygons = [
polygon
for area, area_factor, polygon in zip(areas, area_factors, polygons)
if area > abs_tr and area_factor > rel_tr
]
return filtered_polygons
class ContouringAlgorithm(Algorithm):
"""Implementation of a vectorization process through contouring raster image."""
class Parameters(Algorithm.Parameters):
"""Parameters class of the ContouringAlgorithm class."""
contour_filters: List[Callable[[List[np.ndarray]], List[np.ndarray]]]
__parameters_type__ = Parameters
def __init__(
self,
contour_filters: List[Callable[[List[np.ndarray]], List[np.ndarray]]] = [filter_polygon_areas],
) -> None:
<fim_suffix>
super().__init__(contour_filters=contour_filters)
def run(self, geometry_mask: GeometryMask) -> GeometryPolygons:
"""Contouring vectorization algorithm implementation.
Args:
geometry_mask (GeometryMask): Geometry segmentation map.
Raises:
VectorizationError: Raised if iris region not segmented or an error occur during iris region processing.
Returns:
GeometryPolygons: Geometry polygons points.
"""
if not np.any(geometry_mask.iris_mask):
raise VectorizationError("Geometry raster verification failed.")
geometry_contours = self._find_contours(geometry_mask)
return geometry_contours
def _find_contours(self, mask: GeometryMask) -> GeometryPolygons:
"""Find raw contours for different classes in raster.
Args:
mask (GeometryMask): Raster object.
Returns:
GeometryPolygons: Raw contours indicating polygons of different classes.
"""
eyeball_array = self._find_class_contours(mask.filled_eyeball_mask.astype(np.uint8))
iris_array = self._find_class_contours(mask.filled_iris_mask.astype(np.uint8))
pupil_array = self._find_class_contours(mask.pupil_mask.astype(np.uint8))
return GeometryPolygons(pupil_array=pupil_array, iris_array=iris_array, eyeball_array=eyeball_array)
def _find_class_contours(self, binary_mask: np.ndarray) -> np.ndarray:
"""Find contour between two different contours.
Args:
binary_mask (np.ndarray): Raster object.
Raises:
VectorizationError: Raised if number of contours found is different than 1.
Returns:
np.ndarray: Contour points array.
"""
contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if hierarchy is None:
raise VectorizationError("_find_class_contours: No contour hierarchy found at all.")
parent_indices = np.flatnonzero(hierarchy[..., 3] == -1)
contours = [np.squeeze(contours[i]) for i in parent_indices]
contours = self._filter_contours(contours)
if len(contours) != 1:
raise VectorizationError("_find_class_contours: Number of contours must be equal to 1.")
return contours[0]
def _filter_contours(self, contours: List[np.ndarray]) -> List[np.ndarray]:
"""Filter contours based on predefined filters.
Args:
contours (List[np.ndarray]): Contours list.
Returns:
List[np.ndarray]: Filtered list of contours.
"""
for filter_func in self.params.contour_filters:
contours = filter_func(contours)
return contours
<fim_middle>"""Assign parameters.
Args:
contour_filters (List[Callable[[List[np.ndarray]], List[np.ndarray]]], optional): List of filter functions used to filter out noise in polygons.
Defaults to [ContouringAlgorithm.filter_polygon_areas].
""" | """Assign parameters.
Args:
contour_filters (List[Callable[[List[np.ndarray]], List[np.ndarray]]], optional): List of filter functions used to filter out noise in polygons.
Defaults to [ContouringAlgorithm.filter_polygon_areas].
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/probe_schemas/regular_probe_schema.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/validators.py
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
# open-iris/src/iris/orchestration/validators.py
def pipeline_metadata_version_check(cls: type, v: str, field: fields.ModelField) -> str:
"""Check if the version provided in the input config matches the current iris.__version__."""
if v != __version__:
raise IRISPipelineError(
f"Wrong config version. Cannot initialise IRISPipeline version {__version__} on a config file "
f"version {v}"
)
return v
# open-iris/src/iris/nodes/aggregation/noise_mask_union.py
def run(self, elements: List[NoiseMask]) -> NoiseMask:
"""Compute the union of a list of NoiseMask.
Args:
elements (List[NoiseMask]): input NoiseMasks.
Raises:
ValueError: if not all NoiseMask.mask do not have the same shape.
Returns:
NoiseMask: aggregated NoiseMasks
"""
if not all([mask.mask.shape == elements[0].mask.shape for mask in elements]):
raise ValueError(
f"Every NoiseMask.mask must have the same shape to be aggregated. "
f"Received {[mask.mask.shape for mask in elements]}"
)
noise_union = np.sum([mask.mask for mask in elements], axis=0) > 0
return NoiseMask(mask=noise_union)
"""
from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, PositiveInt, confloat, fields, validator
from iris.io.errors import ProbeSchemaError
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
class RegularProbeSchema(ProbeSchema):
"""Probe Schema for a regular Grid."""
class RegularProbeSchemaParameters(ProbeSchema.ProbeSchemaParameters):
"""RegularProbeSchema parameters."""
n_rows: int = Field(..., gt=1)
n_cols: int = Field(..., gt=1)
boundary_rho: List[confloat(ge=0.0, lt=1)]
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"],
List[confloat(ge=0.0, lt=1)],
]
image_shape: Optional[List[PositiveInt]]
@validator("boundary_rho", "boundary_phi")
def check_overlap(
cls: type,
v: Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]],
field: fields.ModelField,
) -> Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]:
"""Validate offsets to avoid overlap.
Args:
cls (type): Class type.
v (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ProbeSchemaError: Raises warning that offsets are together too large.
Returns:
Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]: The value for boundary_rho or boundary_phi respectively
"""
if isinstance(v, List):
if (v[0] + v[1]) >= 1:
raise ProbeSchemaError(
f"Offset for {field.name} on left and right corner must be a sum smaller 1, otherwise, offsets overlap."
)
return v
__parameters_type__ = RegularProbeSchemaParameters
def __init__(
self,
n_rows: int,
n_cols: int,
boundary_rho: List[float] = [0, 0.0625],
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]
] = "periodic-left",
image_shape: Optional[List[PositiveInt]] = None,
) -> None:
"""Assign parameters.
Args:
n_rows (int): Number of rows used, represents the number of different rho
values
n_cols (int): Number of columns used, represents the number of different
phi values
boundary_rho (List[float], optional): List with two values f1 and f2. The sampling goes from 0+f1 to 0-f2.
boundary_phi (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one column to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last column are placed with an offset to the
borders, that is half of the spacing of the two columns
- 'periodic-left': the first column is at the border of the bottom of the image, while
the last column is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
image_shape (list, optional): list containing the desired image dimensions. If provided, the function will throw
a warning if interpolation happens, i.e. if a kernel would be placed in between two pixels. Defaults to None.
"""
super().__init__(
n_rows=n_rows,
n_cols=n_cols,
boundary_rho=boundary_rho,
boundary_phi=boundary_phi,
image_shape=image_shape,
)
def generate_schema(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generate rhos and phis.
Return:
Tuple[np.ndarray, np.ndarray]: the rhos and phis.
"""
rho = np.linspace(
0 + self.params.boundary_rho[0], 1 - self.params.boundary_rho[1], self.params.n_rows, endpoint=True
)
if self.params.boundary_phi == "periodic-symmetric":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
phi = phi + (phi[1] - phi[0]) / 2
if self.params.boundary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
)
phis, rhos = np.meshgrid(phi, rho)
rhos = rhos.flatten()
<fim_suffix>
# if image_shape provided: verify that values lie on pixel values
if self.params.image_shape is not None:
rhos_pixel_values = rhos * self.params.image_shape[0]
phis_pixel_values = phis * self.params.image_shape[1]
rho_pixel_values = np.logical_or(
np.less_equal(rhos_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), rhos_pixel_values % 1),
).all()
phi_pixel_values = np.logical_or(
np.less_equal(phis_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), phis_pixel_values % 1),
).all()
if not rho_pixel_values:
raise ProbeSchemaError(
f"Choice for n_rows {self.params.n_rows} leads to interpolation errors, please change input variables"
)
if not phi_pixel_values:
raise ProbeSchemaError(f"Choice for n_cols {self.params.n_cols} leads to interpolation errors")
return rhos, phis
@staticmethod
def find_suitable_n_rows(
row_min: int,
row_max: int,
length: int,
boundary_condition: Union[
Literal["periodic-symmetric", "periodic-left"],
List[float],
] = "periodic_symmetric",
) -> List[int]:
"""Find proper spacing of rows/columns for given boundary conditions (i.e. image size, offset. etc).
Args:
row_min (int): Starting value for row count
row_max (int): End value for row count
length (int): Pixels in the respective dimension
boundary_condition (Union[Literal["periodic-symmetric", "periodic-left"], List[float]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one row to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last row are placed with an offset to the
borders, that is half of the spacing of the two rows
- 'periodic-left': the first row is at the border of the bottom of the image, while
the last row is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
Returns:
list: List of all number of rows that does not lead to interpolation errors
"""
suitable_values: List[int] = []
# loop through all values and validate whether they are suitable
for counter in range(row_min, row_max + 1):
if boundary_condition == "periodic-symmetric":
values = np.linspace(0, 1, counter, endpoint=False)
values = values + (values[1] - values[0]) / 2
if boundary_condition == "periodic-left":
values = np.linspace(0, 1, counter, endpoint=False)
if isinstance(boundary_condition, List):
values = np.linspace(0 + boundary_condition[0], 1 - boundary_condition[1], counter, endpoint=True)
pixel_values = values * length
pixel_values_modulo = pixel_values % 1
no_interpolation = np.less_equal(pixel_values_modulo, 10 ** (-10))
no_interpolation = np.logical_or(no_interpolation, np.less_equal(1 - 10 ** (-10), pixel_values_modulo))
no_interpolation = no_interpolation.all()
if no_interpolation:
suitable_values.append(counter)
return suitable_values
<fim_middle>phis = phis.flatten() | phis = phis.flatten() | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/occlusion_calculator.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/validators/cross_object_validators.py
def run(self, ir_image: IRImage, extrapolated_polygons: GeometryPolygons) -> None:
"""Perform validation.
Args:
ir_image (IRImage): IR image.
extrapolated_polygons (GeometryPolygons): Extrapolated polygons.
Raises:
ExtrapolatedPolygonsInsideImageValidatorError: Raised if not enough points of the pupil/iris/eyeball are within an image.
"""
if not self._check_correct_percentage(
extrapolated_polygons.pupil_array, self.params.min_pupil_allowed_percentage, ir_image
):
raise ExtrapolatedPolygonsInsideImageValidatorError("Not enough pupil points are within an image.")
if not self._check_correct_percentage(
extrapolated_polygons.iris_array, self.params.min_iris_allowed_percentage, ir_image
):
raise ExtrapolatedPolygonsInsideImageValidatorError("Not enough iris points are within an image.")
if not self._check_correct_percentage(
extrapolated_polygons.eyeball_array, self.params.min_eyeball_allowed_percentage, ir_image
):
raise ExtrapolatedPolygonsInsideImageValidatorError("Not enough eyeball points are within an image.")
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using nonlinear transformation when sampling points from cartisian to polar coordinates.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points = self._generate_correspondences(pupil_points, iris_points)
normalized_image, normalized_mask = self._normalize_all(
original_image=image.img_data, iris_mask=iris_mask, src_points=src_points
)
normalized_iris = NormalizedIris(
normalized_image=normalized_image,
normalized_mask=normalized_mask,
)
return normalized_iris
# open-iris/src/iris/nodes/geometry_estimation/fusion_extrapolation.py
def run(self, input_polygons: GeometryPolygons, eye_center: EyeCenters) -> GeometryPolygons:
"""Perform extrapolation algorithm.
Args:
input_polygons (GeometryPolygons): Smoothed polygons.
eye_center (EyeCenters): Computed eye centers.
Returns:
GeometryPolygons: Extrapolated polygons
"""
xs, ys = input_polygons.iris_array[:, 0], input_polygons.iris_array[:, 1]
rhos, _ = cartesian2polar(xs, ys, eye_center.iris_x, eye_center.iris_y)
new_poly = self.params.circle_extrapolation(input_polygons, eye_center)
radius_std = rhos.std()
if radius_std > self.params.algorithm_switch_std_threshold:
ellipse_poly = self.params.ellipse_fit(input_polygons)
new_poly = GeometryPolygons(
pupil_array=new_poly.pupil_array,
iris_array=ellipse_poly.iris_array,
eyeball_array=input_polygons.eyeball_array,
)
return new_poly
"""
from typing import List, Tuple
import numpy as np
from pydantic import Field
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, EyeOcclusion, EyeOrientation, GeometryPolygons, NoiseMask
from iris.utils import common, math
class OcclusionCalculator(Algorithm):
"""Calculate the eye occlusion value.
This algorithm computes the fraction of visible iris in an image based on extrapolated polygons and the various noise masks.
For an occlusion of 0, the iris is completely occluded. For an occlusion of 1, the iris is completely visible
For historical reasons, this remained called "Occlusion", while it more precisely refers to the "Opening" of the eye.
The parameter `quantile_angle` refers to the zone of the iris to consider for the occlusion computation.
This is because the middle horizontal third of the iris is usually more useful, since less likely to be occluded by the eyelids.
For a `quantile_angle` of 90º, the entire iris will be considered.
For a `quantile_angle` of 30º, the horizontal middle third of the iris will be considered.
For a `quantile_angle` of 0º, nothing will be considered (limit value).
"""
class Parameters(Algorithm.Parameters):
"""Default OcclusionCalculator parameters."""
quantile_angle: float = Field(..., ge=0.0, le=90.0)
__parameters_type__ = Parameters
def __init__(self, quantile_angle: float, callbacks: List[Callback] = []) -> None:
"""Assign parameters.
Args:
quantile_angle (float): Quantile angle for estimating the area in which we want to calculate the visible fraction value in degrees.
callbacks (List[Callback]): callbacks list. Defaults to [].
"""
super().__init__(quantile_angle=quantile_angle, callbacks=callbacks)
def run(
self,
extrapolated_polygons: GeometryPolygons,
noise_mask: NoiseMask,
eye_orientation: EyeOrientation,
eye_centers: EyeCenters,
) -> EyeOcclusion:
"""Compute the iris visible fraction.
Args:
extrapolated_polygons (GeometryPolygons): Extrapolated polygons contours.
noise_mask (NoiseMask): Noise mask.
eye_orientation (EyeOrientation): Eye orientation angle.
eye_centers (EyeCenters): Eye centers.
Returns:
EyeOcclusion: Visible iris fraction.
"""
if self.params.quantile_angle == 0.0:
return EyeOcclusion(visible_fraction=0.0)
xs2mask, ys2mask = self._get_quantile_points(extrapolated_polygons.iris_array, eye_orientation, eye_centers)
img_h, img_w = noise_mask.mask.shape
iris_mask_quantile = common.contour_to_mask(np.column_stack([xs2mask, ys2mask]), mask_shape=(img_w, img_h))
<fim_suffix>
eyeball_mask = common.contour_to_mask(extrapolated_polygons.eyeball_array, mask_shape=(img_w, img_h))
visible_iris_mask = iris_mask_quantile & ~pupil_mask & eyeball_mask & ~noise_mask.mask
extrapolated_iris_mask = iris_mask_quantile & ~pupil_mask
if extrapolated_iris_mask.sum() == 0:
return EyeOcclusion(visible_fraction=0.0)
visible_fraction = visible_iris_mask.sum() / extrapolated_iris_mask.sum()
return EyeOcclusion(visible_fraction=visible_fraction)
def _get_quantile_points(
self, iris_coords: np.ndarray, eye_orientation: EyeOrientation, eye_centers: EyeCenters
) -> Tuple[np.ndarray, np.ndarray]:
"""Get those iris's points which fall into a specified quantile.
Args:
iris_coords (np.ndarray): Iris polygon coordinates.
eye_orientation: (EyeOrientation): Eye orientation.
eye_centers: (EyeCenters): Eye centers.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with xs and ys that falls into quantile region.
"""
orientation_angle = np.degrees(eye_orientation.angle)
num_rotations = -round(orientation_angle * len(iris_coords) / 360.0)
iris_xs, iris_ys = iris_coords[:, 0], iris_coords[:, 1]
iris_rhos, iris_phis = math.cartesian2polar(iris_xs, iris_ys, eye_centers.iris_x, eye_centers.iris_y)
iris_phis = np.roll(iris_phis, num_rotations, axis=0)
iris_rhos = np.roll(iris_rhos, num_rotations, axis=0)
scaled_quantile = round(self.params.quantile_angle * len(iris_coords) / 360.0)
phis2mask = np.concatenate(
[
iris_phis[:scaled_quantile],
iris_phis[-scaled_quantile:],
iris_phis[len(iris_phis) // 2 : len(iris_phis) // 2 + scaled_quantile],
iris_phis[len(iris_phis) // 2 - scaled_quantile : len(iris_phis) // 2],
]
)
rhos2mask = np.concatenate(
[
iris_rhos[:scaled_quantile],
iris_rhos[-scaled_quantile:],
iris_rhos[len(iris_rhos) // 2 : len(iris_rhos) // 2 + scaled_quantile],
iris_rhos[len(iris_rhos) // 2 - scaled_quantile : len(iris_rhos) // 2],
]
)
phis2mask, rhos2mask = zip(*sorted(zip(phis2mask, rhos2mask)))
xs2mask, ys2mask = math.polar2cartesian(rhos2mask, phis2mask, eye_centers.iris_x, eye_centers.iris_y)
return xs2mask, ys2mask
<fim_middle>pupil_mask = common.contour_to_mask(extrapolated_polygons.pupil_array, mask_shape=(img_w, img_h)) | pupil_mask = common.contour_to_mask(extrapolated_polygons.pupil_array, mask_shape=(img_w, img_h)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/nonlinear_normalization.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _generate_correspondences(
self, pupil_points: np.ndarray, iris_points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate correspondences between points in original image and normalized image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with generated correspondences.
"""
pupil_points = pupil_points[:: self.params.skip_boundary_points]
iris_points = iris_points[:: self.params.skip_boundary_points]
src_points = []
for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring)
src_points = np.array(src_points)
num_rings, num_ring_points = src_points.shape[:2]
dst_xs, dst_ys = np.meshgrid(
np.linspace(0, self.params.res_in_phi, num_ring_points).astype(int),
np.linspace(0, self.params.res_in_r, num_rings).astype(int),
)
dst_points = np.array([dst_xs, dst_ys]).transpose((1, 2, 0))
return src_points, dst_points
# open-iris/src/iris/nodes/normalization/common.py
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
"""
from typing import Collection, Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeOrientation, GeometryPolygons, IRImage, NoiseMask, NormalizedIris
from iris.io.errors import NormalizationError
from iris.nodes.normalization.common import (
correct_orientation,
generate_iris_mask,
getgrids,
interpolate_pixel_intensity,
)
from iris.utils import math
class NonlinearNormalization(Algorithm):
"""Implementation of a normalization algorithm which uses nonlinear squared transformation to map image pixels.
Algorithm steps:
1) Create nonlinear grids of sampling radii based on parameters: res_in_r, intermediate_radiuses.
2) Compute the mapping between the normalized image pixel location and the original image location.
3) Obtain pixel values of normalized image using bilinear interpolation.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for NonlinearNormalization."""
res_in_r: int = Field(..., gt=0)
intermediate_radiuses: Collection[float]
oversat_threshold: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(self, res_in_r: int = 128, oversat_threshold: int = 254) -> None:
"""Assign parameters.
Args:
res_in_r (int): Normalized image r resolution. Defaults to 128.
oversat_threshold (int, optional): threshold for masking over-satuated pixels. Defaults to 254.
"""
intermediate_radiuses = np.array([getgrids(max(0, res_in_r), p2i_ratio) for p2i_ratio in range(100)])
super().__init__(
res_in_r=res_in_r,
intermediate_radiuses=intermediate_radiuses,
oversat_threshold=oversat_threshold,
)
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using nonlinear transformation when sampling points from cartisian to polar coordinates.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points = self._generate_correspondences(pupil_points, iris_points)
normalized_image, normalized_mask = self._normalize_all(
original_image=image.img_data, iris_mask=iris_mask, src_points=src_points
)
normalized_iris = NormalizedIris(
normalized_image=normalized_image,
normalized_mask=normalized_mask,
)
return normalized_iris
def _generate_correspondences(self, pupil_points: np.ndarray, iris_points: np.ndarray) -> np.ndarray:
"""Generate corresponding positions in original image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points x 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points x 2).
Returns:
np.ndarray: generated corresponding points.
"""
pupil_diameter = math.estimate_diameter(pupil_points)
iris_diameter = math.estimate_diameter(iris_points)
p2i_ratio = pupil_diameter / iris_diameter
if p2i_ratio <= 0 or p2i_ratio >= 1:
raise NormalizationError(f"Invalid pupil to iris ratio, not in the range (0,1): {p2i_ratio}.")
src_points = np.array(
[
pupil_points + x * (iris_points - pupil_points)
for x in self.params.intermediate_radiuses[round(100 * (p2i_ratio))]
]
)
return src_points
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
<fim_suffix>
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
@staticmethod
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
<fim_middle>src_shape = src_points.shape[0:2] | src_shape = src_points.shape[0:2] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/utils/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/dataclasses.py
def filled_iris_mask(self) -> np.ndarray:
"""Fill iris mask.
Returns:
np.ndarray: Iris mask with filled pupil "holes".
"""
binary_maps = np.zeros(self.iris_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
return binary_maps.astype(bool)
# open-iris/src/iris/io/dataclasses.py
def filled_eyeball_mask(self) -> np.ndarray:
"""Fill eyeball mask.
Returns:
np.ndarray: Eyeball mask with filled iris/pupil "holes".
"""
binary_maps = np.zeros(self.eyeball_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
binary_maps += self.eyeball_mask
return binary_maps.astype(bool)
# open-iris/src/iris/nodes/normalization/common.py
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
"""
from typing import Tuple
import cv2
import numpy as np
def contour_to_mask(vertices: np.ndarray, mask_shape: Tuple[int, int]) -> np.ndarray:
"""Generate binary mask based on polygon's vertices.
Args:
vertices (np.ndarray): Vertices points array.
mask_shape (Tuple[int, int]): Tuple with output mask dimension (weight, height).
Returns:
np.ndarray: Binary mask.
"""
<fim_suffix>
mask = np.zeros(shape=(height, width, 3))
vertices = np.round(vertices).astype(np.int32)
cv2.fillPoly(mask, pts=[vertices], color=(255, 0, 0))
mask = mask[..., 0]
mask = mask.astype(bool)
return mask
<fim_middle>width, height = mask_shape | width, height = mask_shape | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
# open-iris/src/iris/utils/math.py
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
"""
from typing import Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import EyeCentersEstimationError
class BisectorsMethod(Algorithm):
"""Implementation of eye's center estimation algorithm using bisectors method for finding a circle center.
This algorithm samples a given number of bisectors from the pupil and iris polygons, and averages their intersection
to produce the polygon center. This method is robust against noise in the polygons, making it a good choice for
non-perfect shapes. It is also robust to polygons missing parts of the circle arc, making it a good choice for
partially-occluded shapes.
LIMITATIONS:
The iris and pupil can be approximated to circles, when the user is properly gazing at the camera.
This requires that the cases of off-gaze have already been filtered out.
"""
class Parameters(Algorithm.Parameters):
"""Default Parameters for BisectorsMethod algorithm."""
num_bisectors: int = Field(..., gt=0)
min_distance_between_sector_points: float = Field(..., gt=0.0, lt=1.0)
max_iterations: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(
self,
num_bisectors: int = 100,
min_distance_between_sector_points: float = 0.75,
max_iterations: int = 50,
) -> None:
"""Assign parameters.
Args:
num_bisectors (int, optional): Number of bisectors.. Defaults to 100.
min_distance_between_sector_points (float, optional): Minimum distance between sectors expressed as a fractional value of a circular shape diameter. Defaults to 0.75.
max_iterations (int, optional): Max iterations for bisector search.. Defaults to 50.
"""
super().__init__(
num_bisectors=num_bisectors,
min_distance_between_sector_points=min_distance_between_sector_points,
max_iterations=max_iterations,
)
def run(self, geometries: GeometryPolygons) -> EyeCenters:
"""Estimate eye's iris and pupil centers.
Args:
geometries (GeometryPolygons): Geometry polygons.
Returns:
EyeCenters: Eye's centers object.
"""
pupil_center_x, pupil_center_y = self._find_center_coords(geometries.pupil_array, geometries.pupil_diameter)
iris_center_x, iris_center_y = self._find_center_coords(geometries.iris_array, geometries.iris_diameter)
return EyeCenters(pupil_x=pupil_center_x, pupil_y=pupil_center_y, iris_x=iris_center_x, iris_y=iris_center_y)
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
def _calculate_perpendicular_bisectors(
self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.
A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.
Args:
polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.
min_distance_between_sector_points_in_px (float): Minimum distance between sector points.
Raises:
EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!
Returns:
Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.
"""
np.random.seed(142857)
bisectors_first_points = np.empty([0, 2])
bisectors_second_points = np.empty([0, 2])
for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
<fim_suffix>
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
)
bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]
bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]
bisectors_center = (bisectors_first_points + bisectors_second_points) / 2
# Flip xs with ys and flip sign of on of them to create a 90deg rotation
inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)
inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]
# Add perpendicular vector to center and normalize
norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)
inv_bisectors_center_slope[:, 0] /= norm
inv_bisectors_center_slope[:, 1] /= norm
first_bisectors_point = bisectors_center - inv_bisectors_center_slope
second_bisectors_point = bisectors_center + inv_bisectors_center_slope
return first_bisectors_point, second_bisectors_point
def _find_best_intersection(self, fst_points: np.ndarray, sec_points: np.ndarray) -> Tuple[float, float]:
"""fst_points and sec_points are NxD arrays defining N lines. D is the dimension of the space.
This function returns the least squares intersection of the N lines from the system given by eq. 13 in
http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf.
Args:
fst_points (np.ndarray): First bisectors points.
sec_points (np.ndarray): Second bisectors points.
Returns:
Tuple[float, float]: Best intersection point.
Reference:
[1] http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf
"""
norm_bisectors = (sec_points - fst_points) / np.linalg.norm(sec_points - fst_points, axis=1)[:, np.newaxis]
# Generate the array of all projectors I - n*n.T
projections = np.eye(norm_bisectors.shape[1]) - norm_bisectors[:, :, np.newaxis] * norm_bisectors[:, np.newaxis]
# Generate R matrix and q vector
R = projections.sum(axis=0)
q = (projections @ fst_points[:, :, np.newaxis]).sum(axis=0)
# Solve the least squares problem for the intersection point p: Rp = q
p = np.linalg.lstsq(R, q, rcond=None)[0]
intersection_x, intersection_y = p
return intersection_x.item(), intersection_y.item()
<fim_middle>mask = norms > min_distance_between_sector_points_in_px | mask = norms > min_distance_between_sector_points_in_px | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/probe_schemas/regular_probe_schema.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/validators.py
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
# open-iris/src/iris/orchestration/validators.py
def pipeline_metadata_version_check(cls: type, v: str, field: fields.ModelField) -> str:
"""Check if the version provided in the input config matches the current iris.__version__."""
if v != __version__:
raise IRISPipelineError(
f"Wrong config version. Cannot initialise IRISPipeline version {__version__} on a config file "
f"version {v}"
)
return v
# open-iris/src/iris/nodes/aggregation/noise_mask_union.py
def run(self, elements: List[NoiseMask]) -> NoiseMask:
"""Compute the union of a list of NoiseMask.
Args:
elements (List[NoiseMask]): input NoiseMasks.
Raises:
ValueError: if not all NoiseMask.mask do not have the same shape.
Returns:
NoiseMask: aggregated NoiseMasks
"""
if not all([mask.mask.shape == elements[0].mask.shape for mask in elements]):
raise ValueError(
f"Every NoiseMask.mask must have the same shape to be aggregated. "
f"Received {[mask.mask.shape for mask in elements]}"
)
noise_union = np.sum([mask.mask for mask in elements], axis=0) > 0
return NoiseMask(mask=noise_union)
"""
from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, PositiveInt, confloat, fields, validator
from iris.io.errors import ProbeSchemaError
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
class RegularProbeSchema(ProbeSchema):
"""Probe Schema for a regular Grid."""
class RegularProbeSchemaParameters(ProbeSchema.ProbeSchemaParameters):
"""RegularProbeSchema parameters."""
n_rows: int = Field(..., gt=1)
n_cols: int = Field(..., gt=1)
boundary_rho: List[confloat(ge=0.0, lt=1)]
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"],
List[confloat(ge=0.0, lt=1)],
]
image_shape: Optional[List[PositiveInt]]
@validator("boundary_rho", "boundary_phi")
def check_overlap(
cls: type,
v: Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]],
field: fields.ModelField,
) -> Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]:
"""Validate offsets to avoid overlap.
Args:
cls (type): Class type.
v (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ProbeSchemaError: Raises warning that offsets are together too large.
Returns:
Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]: The value for boundary_rho or boundary_phi respectively
"""
if isinstance(v, List):
if (v[0] + v[1]) >= 1:
raise ProbeSchemaError(
f"Offset for {field.name} on left and right corner must be a sum smaller 1, otherwise, offsets overlap."
)
return v
__parameters_type__ = RegularProbeSchemaParameters
def __init__(
self,
n_rows: int,
n_cols: int,
boundary_rho: List[float] = [0, 0.0625],
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]
] = "periodic-left",
image_shape: Optional[List[PositiveInt]] = None,
) -> None:
"""Assign parameters.
Args:
n_rows (int): Number of rows used, represents the number of different rho
values
n_cols (int): Number of columns used, represents the number of different
phi values
boundary_rho (List[float], optional): List with two values f1 and f2. The sampling goes from 0+f1 to 0-f2.
boundary_phi (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one column to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last column are placed with an offset to the
borders, that is half of the spacing of the two columns
- 'periodic-left': the first column is at the border of the bottom of the image, while
the last column is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
image_shape (list, optional): list containing the desired image dimensions. If provided, the function will throw
a warning if interpolation happens, i.e. if a kernel would be placed in between two pixels. Defaults to None.
"""
super().__init__(
n_rows=n_rows,
n_cols=n_cols,
boundary_rho=boundary_rho,
boundary_phi=boundary_phi,
image_shape=image_shape,
)
def generate_schema(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generate rhos and phis.
Return:
Tuple[np.ndarray, np.ndarray]: the rhos and phis.
"""
rho = np.linspace(
0 + self.params.boundary_rho[0], 1 - self.params.boundary_rho[1], self.params.n_rows, endpoint=True
)
if self.params.boundary_phi == "periodic-symmetric":
<fim_suffix>
phi = phi + (phi[1] - phi[0]) / 2
if self.params.boundary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
)
phis, rhos = np.meshgrid(phi, rho)
rhos = rhos.flatten()
phis = phis.flatten()
# if image_shape provided: verify that values lie on pixel values
if self.params.image_shape is not None:
rhos_pixel_values = rhos * self.params.image_shape[0]
phis_pixel_values = phis * self.params.image_shape[1]
rho_pixel_values = np.logical_or(
np.less_equal(rhos_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), rhos_pixel_values % 1),
).all()
phi_pixel_values = np.logical_or(
np.less_equal(phis_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), phis_pixel_values % 1),
).all()
if not rho_pixel_values:
raise ProbeSchemaError(
f"Choice for n_rows {self.params.n_rows} leads to interpolation errors, please change input variables"
)
if not phi_pixel_values:
raise ProbeSchemaError(f"Choice for n_cols {self.params.n_cols} leads to interpolation errors")
return rhos, phis
@staticmethod
def find_suitable_n_rows(
row_min: int,
row_max: int,
length: int,
boundary_condition: Union[
Literal["periodic-symmetric", "periodic-left"],
List[float],
] = "periodic_symmetric",
) -> List[int]:
"""Find proper spacing of rows/columns for given boundary conditions (i.e. image size, offset. etc).
Args:
row_min (int): Starting value for row count
row_max (int): End value for row count
length (int): Pixels in the respective dimension
boundary_condition (Union[Literal["periodic-symmetric", "periodic-left"], List[float]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one row to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last row are placed with an offset to the
borders, that is half of the spacing of the two rows
- 'periodic-left': the first row is at the border of the bottom of the image, while
the last row is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
Returns:
list: List of all number of rows that does not lead to interpolation errors
"""
suitable_values: List[int] = []
# loop through all values and validate whether they are suitable
for counter in range(row_min, row_max + 1):
if boundary_condition == "periodic-symmetric":
values = np.linspace(0, 1, counter, endpoint=False)
values = values + (values[1] - values[0]) / 2
if boundary_condition == "periodic-left":
values = np.linspace(0, 1, counter, endpoint=False)
if isinstance(boundary_condition, List):
values = np.linspace(0 + boundary_condition[0], 1 - boundary_condition[1], counter, endpoint=True)
pixel_values = values * length
pixel_values_modulo = pixel_values % 1
no_interpolation = np.less_equal(pixel_values_modulo, 10 ** (-10))
no_interpolation = np.logical_or(no_interpolation, np.less_equal(1 - 10 ** (-10), pixel_values_modulo))
no_interpolation = no_interpolation.all()
if no_interpolation:
suitable_values.append(counter)
return suitable_values
<fim_middle>phi = np.linspace(0, 1, self.params.n_cols, endpoint=False) | phi = np.linspace(0, 1, self.params.n_cols, endpoint=False) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
<fim_suffix>
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>return xmin, ymin, xmax, ymax | return xmin, ymin, xmax, ymax | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/image_filters/image_filter_interface.py
def compute_kernel_values(self) -> np.ndarray:
"""Compute values of filter kernel.
Returns:
np.ndarray: Computed kernel values.
"""
pass
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
<fim_suffix>
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle>correction_term_mean = np.mean(envelope, axis=-1) | correction_term_mean = np.mean(envelope, axis=-1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/io/validators.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/dataclasses.py
def _check_segmap_shape_and_consistency(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check that the number of classes equals the depth of the segmentation map.
Args:
values (Dict[str, Any]): Dictionary with segmap and classes {param_name: data}.
Raises:
ValueError: Raised if there is resolution mismatch between image and mask.
Returns:
Dict[str, Any]: Unmodified values parameter passed for further processing.
"""
if values["predictions"].shape[2] != len(values["index2class"]):
segmap_depth, nb_classes = values["predictions"].shape, len(values["index2class"])
raise ValueError(
f"{cls.__name__}: mismatch between predictions shape {segmap_depth} and classes length {nb_classes}."
)
return values
# open-iris/src/iris/orchestration/validators.py
def pipeline_metadata_version_check(cls: type, v: str, field: fields.ModelField) -> str:
"""Check if the version provided in the input config matches the current iris.__version__."""
if v != __version__:
raise IRISPipelineError(
f"Wrong config version. Cannot initialise IRISPipeline version {__version__} on a config file "
f"version {v}"
)
return v
# open-iris/src/iris/orchestration/output_builders.py
def __get_error(call_trace: PipelineCallTraceStorage) -> Optional[Dict[str, Any]]:
"""Produce error output from a call_trace.
Args:
call_trace (PipelineCallTraceStorage): Pipeline call trace.
Returns:
Optional[Dict[str, Any]]: Optional error dictionary if such occured.
"""
exception = call_trace.get_error()
error = None
if isinstance(exception, Exception):
error = {
"error_type": type(exception).__name__,
"message": str(exception),
"traceback": "".join(traceback.format_tb(exception.__traceback__)),
}
return error
"""
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
<fim_suffix>
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
<fim_middle>return values | return values | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/vectorization/contouring.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/validators/object_validators.py
def _check_pupil_point_is_inside_iris(self, point: np.ndarray, polygon_pts: np.ndarray) -> bool:
"""Check if pupil point is inside iris polygon.
Reference:
[1] https://www.geeksforgeeks.org/how-to-check-if-a-given-point-lies-inside-a-polygon/
Args:
point (np.ndarray): Point x, y.
polygon_sides (np.ndarray): Polygon points.
Returns:
bool: Check result.
"""
num_iris_points = len(polygon_pts)
polygon_sides = [
(polygon_pts[i % num_iris_points], polygon_pts[(i + 1) % num_iris_points]) for i in range(num_iris_points)
]
x, y = point
to_right_ray = (point, np.array([float("inf"), y]))
to_left_ray = (np.array([-float("inf"), y]), point)
right_ray_intersections, left_ray_intersections = 0, 0
for poly_side in polygon_sides:
if self._is_ray_intersecting_with_side(to_right_ray, poly_side, is_ray_pointing_to_left=False):
right_ray_intersections += 1
if self._is_ray_intersecting_with_side(to_left_ray, poly_side, is_ray_pointing_to_left=True):
left_ray_intersections += 1
return right_ray_intersections % 2 != 0 or left_ray_intersections % 2 != 0
# open-iris/src/iris/pipelines/iris_pipeline.py
def instanciate_node(
self, node_class: str, algorithm_params: Dict[str, Any], callbacks: Optional[List[PipelineClass]]
) -> Algorithm:
"""Instanciate an Algorithm from its class, kwargs and optional Callbacks.
NOTE: All callbacks of type listed in self.env.disabled_qa will be filtered out. This allows one config file to be used in various QA standards levels.
Args:
node_class (str): Node's class.
algorithm_params (Dict[str, Any]): Node's kwargs.
callbacks (Optional[List[PipelineClass]]): list of callbacks.
Returns:
Algorithm: instanciated node.
"""
if callbacks is not None:
instanciated_callbacks = [self.instanciate_class(cb.class_name, cb.params) for cb in callbacks]
instanciated_callbacks = [cb for cb in instanciated_callbacks if type(cb) not in self.env.disabled_qa]
algorithm_params = {**algorithm_params, **{"callbacks": instanciated_callbacks}}
return self.instanciate_class(node_class, algorithm_params)
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _smooth(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour.
Args:
polygon (np.ndarray): Contour to smooth.
center_xy (Tuple[float, float]): Contour's center.
Returns:
np.ndarray: Smoothed contour's vertices.
"""
arcs, num_gaps = self._cut_into_arcs(polygon, center_xy)
arcs = (
self._smooth_circular_shape(arcs[0], center_xy)
if num_gaps == 0
else np.vstack([self._smooth_arc(arc, center_xy) for arc in arcs if len(arc) >= 2])
)
return arcs
"""
from typing import Callable, List
import cv2
import numpy as np
from pydantic import NonNegativeFloat
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import GeometryMask, GeometryPolygons
from iris.io.errors import VectorizationError
from iris.utils.math import area
def filter_polygon_areas(
polygons: List[np.ndarray], rel_tr: NonNegativeFloat = 0.03, abs_tr: NonNegativeFloat = 0.0
) -> List[np.ndarray]:
"""Filter out polygons whose area is below either an absolute threshold or a fraction of the largest area.
Args:
polygons (List[np.ndarray]): List of polygons to filter.
rel_tr (NonNegativeFloat, optional): Relative threshold. Defaults to 0.03.
abs_tr (NonNegativeFloat, optional): Absolute threshold. Defaults to 0.0.
Returns:
List[np.ndarray]: Filtered polygons' list.
"""
areas = [area(polygon) if len(polygon) > 2 else 1.0 for polygon in polygons]
area_factors = np.array(areas) / np.max(areas)
filtered_polygons = [
polygon
for area, area_factor, polygon in zip(areas, area_factors, polygons)
if area > abs_tr and area_factor > rel_tr
]
<fim_suffix>
class ContouringAlgorithm(Algorithm):
"""Implementation of a vectorization process through contouring raster image."""
class Parameters(Algorithm.Parameters):
"""Parameters class of the ContouringAlgorithm class."""
contour_filters: List[Callable[[List[np.ndarray]], List[np.ndarray]]]
__parameters_type__ = Parameters
def __init__(
self,
contour_filters: List[Callable[[List[np.ndarray]], List[np.ndarray]]] = [filter_polygon_areas],
) -> None:
"""Assign parameters.
Args:
contour_filters (List[Callable[[List[np.ndarray]], List[np.ndarray]]], optional): List of filter functions used to filter out noise in polygons.
Defaults to [ContouringAlgorithm.filter_polygon_areas].
"""
super().__init__(contour_filters=contour_filters)
def run(self, geometry_mask: GeometryMask) -> GeometryPolygons:
"""Contouring vectorization algorithm implementation.
Args:
geometry_mask (GeometryMask): Geometry segmentation map.
Raises:
VectorizationError: Raised if iris region not segmented or an error occur during iris region processing.
Returns:
GeometryPolygons: Geometry polygons points.
"""
if not np.any(geometry_mask.iris_mask):
raise VectorizationError("Geometry raster verification failed.")
geometry_contours = self._find_contours(geometry_mask)
return geometry_contours
def _find_contours(self, mask: GeometryMask) -> GeometryPolygons:
"""Find raw contours for different classes in raster.
Args:
mask (GeometryMask): Raster object.
Returns:
GeometryPolygons: Raw contours indicating polygons of different classes.
"""
eyeball_array = self._find_class_contours(mask.filled_eyeball_mask.astype(np.uint8))
iris_array = self._find_class_contours(mask.filled_iris_mask.astype(np.uint8))
pupil_array = self._find_class_contours(mask.pupil_mask.astype(np.uint8))
return GeometryPolygons(pupil_array=pupil_array, iris_array=iris_array, eyeball_array=eyeball_array)
def _find_class_contours(self, binary_mask: np.ndarray) -> np.ndarray:
"""Find contour between two different contours.
Args:
binary_mask (np.ndarray): Raster object.
Raises:
VectorizationError: Raised if number of contours found is different than 1.
Returns:
np.ndarray: Contour points array.
"""
contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if hierarchy is None:
raise VectorizationError("_find_class_contours: No contour hierarchy found at all.")
parent_indices = np.flatnonzero(hierarchy[..., 3] == -1)
contours = [np.squeeze(contours[i]) for i in parent_indices]
contours = self._filter_contours(contours)
if len(contours) != 1:
raise VectorizationError("_find_class_contours: Number of contours must be equal to 1.")
return contours[0]
def _filter_contours(self, contours: List[np.ndarray]) -> List[np.ndarray]:
"""Filter contours based on predefined filters.
Args:
contours (List[np.ndarray]): Contours list.
Returns:
List[np.ndarray]: Filtered list of contours.
"""
for filter_func in self.params.contour_filters:
contours = filter_func(contours)
return contours
<fim_middle>return filtered_polygons | return filtered_polygons | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response_refinement/fragile_bits_refinement.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/encoder/iris_encoder.py
def run(self, response: IrisFilterResponse) -> IrisTemplate:
"""Encode iris code and mask code.
Args:
response (IrisFilterResponse): Filter responses.
Returns:
IrisTemplate: Final iris template.
"""
iris_codes: List[np.ndarray] = []
mask_codes: List[np.ndarray] = []
for iris_response, mask_response in zip(response.iris_responses, response.mask_responses):
mask_code = mask_response >= self.params.mask_threshold
iris_code = np.stack([iris_response.real > 0, iris_response.imag > 0], axis=-1)
mask_code = np.stack([mask_code, mask_code], axis=-1)
iris_codes.append(iris_code)
mask_codes.append(mask_code)
return IrisTemplate(iris_codes=iris_codes, mask_codes=mask_codes)
# open-iris/src/iris/nodes/geometry_estimation/fusion_extrapolation.py
def run(self, input_polygons: GeometryPolygons, eye_center: EyeCenters) -> GeometryPolygons:
"""Perform extrapolation algorithm.
Args:
input_polygons (GeometryPolygons): Smoothed polygons.
eye_center (EyeCenters): Computed eye centers.
Returns:
GeometryPolygons: Extrapolated polygons
"""
xs, ys = input_polygons.iris_array[:, 0], input_polygons.iris_array[:, 1]
rhos, _ = cartesian2polar(xs, ys, eye_center.iris_x, eye_center.iris_y)
new_poly = self.params.circle_extrapolation(input_polygons, eye_center)
radius_std = rhos.std()
if radius_std > self.params.algorithm_switch_std_threshold:
ellipse_poly = self.params.ellipse_fit(input_polygons)
new_poly = GeometryPolygons(
pupil_array=new_poly.pupil_array,
iris_array=ellipse_poly.iris_array,
eyeball_array=input_polygons.eyeball_array,
)
return new_poly
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def run(self, polygons: GeometryPolygons, eye_centers: EyeCenters) -> GeometryPolygons:
"""Perform smoothing refinement.
Args:
polygons (GeometryPolygons): Contours to refine.
eye_centers (EyeCenters): Eye center used when performing a coordinates mapping from cartesian space to polar space.
Returns:
GeometryPolygons: Smoothed contours.
"""
pupil_arcs = self._smooth(polygons.pupil_array, (eye_centers.pupil_x, eye_centers.pupil_y))
iris_arcs = self._smooth(polygons.iris_array, (eye_centers.iris_x, eye_centers.iris_y))
return GeometryPolygons(pupil_array=pupil_arcs, iris_array=iris_arcs, eyeball_array=polygons.eyeball_array)
"""
from typing import Literal, Tuple
import numpy as np
from pydantic import confloat
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import IrisFilterResponse
class FragileBitRefinement(Algorithm):
"""Refining mask by masking out fragile bits.
Algorithm:
Thresholding by the given parameter value_threshold at each bit, set the corresponding mask response to 0 if iris response is below the threshold.
"""
class Parameters(Algorithm.Parameters):
"""RegularProbeSchema parameters."""
value_threshold: Tuple[confloat(ge=0), confloat(ge=0)]
fragile_type: Literal["cartesian", "polar"]
__parameters_type__ = Parameters
def __init__(
self,
value_threshold: Tuple[confloat(ge=0), confloat(ge=0)],
fragile_type: Literal["cartesian", "polar"] = "polar",
) -> None:
"""Create Fragile Bit Refinement object.
Args:
value_threshold (Tuple[confloat(ge=0), confloat(ge=0)]): Thresholding iris response values.
fragile_type (Literal["cartesian", "polar"], optional): The Fragile bits can be either
calculated in cartesian or polar coordinates. In the first, the values
of value_threshold denote to x and y axis, in the case of polar coordinates,
the values denote to radius and angle. Defaults to "polar".
"""
super().__init__(value_threshold=value_threshold, fragile_type=fragile_type)
def run(self, iris_filter_response: IrisFilterResponse) -> IrisFilterResponse:
"""Generate refined IrisFilterResponse.
Args:
iris_filter_response (IrisFilterResponse): Filter bank response.
Returns:
IrisFilterResponse: Filter bank response.
"""
fragile_masks = []
<fim_suffix>
return IrisFilterResponse(iris_responses=iris_filter_response.iris_responses, mask_responses=fragile_masks)
<fim_middle>for iris_response, iris_mask in zip(iris_filter_response.iris_responses, iris_filter_response.mask_responses):
if self.params.fragile_type == "cartesian":
mask_value_real = np.abs(np.real(iris_response)) >= self.params.value_threshold[0]
mask_value_imaginary = np.abs(np.imag(iris_response)) >= self.params.value_threshold[1]
mask_value = mask_value_real * mask_value_imaginary
if self.params.fragile_type == "polar":
iris_response_r = np.abs(iris_response)
iris_response_phi = np.angle(iris_response)
mask_value_r = iris_response_r >= self.params.value_threshold[0]
cos_mask = np.abs(np.cos(iris_response_phi)) <= np.abs(np.cos(self.params.value_threshold[1]))
sine_mask = np.abs(np.sin(iris_response_phi)) <= np.abs(np.cos(self.params.value_threshold[1]))
mask_value_phi = cos_mask * sine_mask
mask_value = mask_value_r * mask_value_phi
mask_value = mask_value * iris_mask
fragile_masks.append(mask_value) | for iris_response, iris_mask in zip(iris_filter_response.iris_responses, iris_filter_response.mask_responses):
if self.params.fragile_type == "cartesian":
mask_value_real = np.abs(np.real(iris_response)) >= self.params.value_threshold[0]
mask_value_imaginary = np.abs(np.imag(iris_response)) >= self.params.value_threshold[1]
mask_value = mask_value_real * mask_value_imaginary
if self.params.fragile_type == "polar":
iris_response_r = np.abs(iris_response)
iris_response_phi = np.angle(iris_response)
mask_value_r = iris_response_r >= self.params.value_threshold[0]
cos_mask = np.abs(np.cos(iris_response_phi)) <= np.abs(np.cos(self.params.value_threshold[1]))
sine_mask = np.abs(np.sin(iris_response_phi)) <= np.abs(np.cos(self.params.value_threshold[1]))
mask_value_phi = cos_mask * sine_mask
mask_value = mask_value_r * mask_value_phi
mask_value = mask_value * iris_mask
fragile_masks.append(mask_value) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/geometry_refinement/smoothing.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/dataclasses.py
def index_of(self, class_name: str) -> int:
"""Get class index based on its name.
Args:
class_name (str): Class name
Raises:
ValueError: Index of a class
Returns:
int: Raised if `class_name` not found in `index2class` dictionary.
"""
for index, name in self.index2class.items():
if name == class_name:
return index
raise ValueError(f"Index for the `{class_name}` not found")
# open-iris/src/iris/callbacks/pipeline_trace.py
def __getitem__(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
return self.get(result_name)
# open-iris/src/iris/utils/math.py
def area(array: np.ndarray) -> float:
"""Shoelace formula for simple polygon area calculation.
WARNING: This formula only works for simple polygons, i.e planar polygon without self-intersection nor holes.
These conditions are not checked within this function.
Args:
array (np.ndarray): np array representing a polygon as a list of points, i.e. of shape (_, 2).
Raises:
ValueError: if the input array does not have shape (_, 2)
Returns:
float: Polygon area
References:
[1] https://en.wikipedia.org/wiki/Shoelace_formula
[2] https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
"""
if len(array.shape) != 2 or array.shape[1] != 2:
raise ValueError(f"Unable to determine the area of a polygon with shape {array.shape}. Expecting (_, 2).")
xs, ys = array.T
area = 0.5 * np.abs(np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))
return float(area)
"""
from typing import List, Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import GeometryRefinementError
from iris.utils import math
class Smoothing(Algorithm):
"""Implementation of contour smoothing algorithm.
Algorithm steps:
1) Map iris/pupil points to polar space based on estimated iris/pupil centers.
2) Smooth iris/pupil contour by applying 1D convolution with rolling median kernel approach.
3) Map points back to cartesian space from polar space.
"""
class Parameters(Algorithm.Parameters):
"""Smoothing parameters class."""
dphi: float = Field(..., gt=0.0, lt=360.0)
kernel_size: float = Field(..., gt=0.0, lt=360.0)
gap_threshold: float = Field(..., gt=0.0, lt=360.0)
__parameters_type__ = Parameters
def __init__(self, dphi: float = 1.0, kernel_size: float = 10.0, gap_threshold: float = 10.0) -> None:
"""Assign parameters.
Args:
dphi (float, optional): phi angle delta used to sample points while doing smoothing by interpolation. Defaults to 1.0.
kernel_size (float, optional): Rolling median kernel size expressed in radians. Final kernel size is computed as a quotient of kernel_size and dphi. Defaults to 10.0.
gap_threshold (float, optional): Gap threshold distance. Defaults to None. Defaults to 10.0.
"""
super().__init__(dphi=dphi, kernel_size=kernel_size, gap_threshold=gap_threshold)
@property
def kernel_offset(self) -> int:
"""Kernel offset (distance from kernel center to border) property used when smoothing with rolling median. If a quotient is less then 1 then kernel size equal to 1 is returned.
Returns:
int: Kernel size.
"""
return max(1, int((np.radians(self.params.kernel_size) / np.radians(self.params.dphi))) // 2)
def run(self, polygons: GeometryPolygons, eye_centers: EyeCenters) -> GeometryPolygons:
"""Perform smoothing refinement.
Args:
polygons (GeometryPolygons): Contours to refine.
eye_centers (EyeCenters): Eye center used when performing a coordinates mapping from cartesian space to polar space.
Returns:
GeometryPolygons: Smoothed contours.
"""
pupil_arcs = self._smooth(polygons.pupil_array, (eye_centers.pupil_x, eye_centers.pupil_y))
iris_arcs = self._smooth(polygons.iris_array, (eye_centers.iris_x, eye_centers.iris_y))
return GeometryPolygons(pupil_array=pupil_arcs, iris_array=iris_arcs, eyeball_array=polygons.eyeball_array)
def _smooth(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour.
Args:
polygon (np.ndarray): Contour to smooth.
center_xy (Tuple[float, float]): Contour's center.
Returns:
np.ndarray: Smoothed contour's vertices.
"""
arcs, num_gaps = self._cut_into_arcs(polygon, center_xy)
arcs = (
self._smooth_circular_shape(arcs[0], center_xy)
if num_gaps == 0
else np.vstack([self._smooth_arc(arc, center_xy) for arc in arcs if len(arc) >= 2])
)
return arcs
def _cut_into_arcs(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> Tuple[List[np.ndarray], int]:
"""Cut contour into arcs.
Args:
polygon (np.ndarray): Contour polygon.
center_xy (Tuple[float, float]): Polygon's center.
Returns:
Tuple[List[np.ndarray], int]: Tuple with: (list of list of vertices, number of gaps detected in a contour).
"""
rho, phi = math.cartesian2polar(polygon[:, 0], polygon[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
differences = np.abs(phi - np.roll(phi, -1))
# True distance between first and last point
differences[-1] = 2 * np.pi - differences[-1]
gap_indices = np.argwhere(differences > np.radians(self.params.gap_threshold)).flatten()
if gap_indices.size < 2:
return [polygon], gap_indices.size
gap_indices += 1
phi, rho = np.split(phi, gap_indices), np.split(rho, gap_indices)
arcs = [
np.column_stack(math.polar2cartesian(rho_coords, phi_coords, *center_xy))
for rho_coords, phi_coords in zip(rho, phi)
]
# Connect arc which lies between 0 and 2π.
if len(arcs) == gap_indices.size + 1:
arcs[0] = np.vstack([arcs[0], arcs[-1]])
arcs = arcs[:-1]
return arcs, gap_indices.size
def _smooth_arc(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour arc.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
idx = self._find_start_index(phi)
offset = phi[idx]
relative_phi = (phi - offset) % (2 * np.pi)
smoothed_relative_phi, smoothed_rho = self._smooth_array(relative_phi, rho)
smoothed_phi = (smoothed_relative_phi + offset) % (2 * np.pi)
x_smoothed, y_smoothed = math.polar2cartesian(smoothed_rho, smoothed_phi, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_array(self, phis: np.ndarray, rhos: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Smooth coordinates expressed in polar space.
Args:
phis (np.ndarray): phi values.
rhos (np.ndarray): rho values.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with smoothed coordinates (phis, rhos).
"""
interpolated_phi = np.arange(min(phis), max(phis), np.radians(self.params.dphi))
interpolated_rho = np.interp(interpolated_phi, xp=phis, fp=rhos, period=2 * np.pi)
smoothed_rho = self._rolling_median(interpolated_rho, self.kernel_offset)
smoothed_phi = interpolated_phi[self.kernel_offset : -self.kernel_offset]
return smoothed_phi, smoothed_rho
def _sort_two_arrays(self, first_list: np.ndarray, second_list: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Sort both numpy arrays based on values from the first_list.
Args:
first_list (np.ndarray): First array.
second_list (np.ndarray): Second array.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with (sorted first array, sorted second array).
"""
zipped_lists = zip(first_list, second_list)
sorted_pairs = sorted(zipped_lists)
sorted_tuples = zip(*sorted_pairs)
first_list, second_list = [list(sorted_tuple) for sorted_tuple in sorted_tuples]
return np.array(first_list), np.array(second_list)
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
def _rolling_median(self, signal: np.ndarray, kernel_offset: int) -> np.ndarray:
"""Compute rolling median of a 1D signal.
Args:
signal (np.ndarray): Signal values.
kernel_size (int): Kernel size.
Raises:
GeometryRefinementError: Raised if signal is not 1D.
Returns:
np.ndarray: Rolling median result.
"""
if signal.ndim != 1:
raise GeometryRefinementError("Smoothing._rolling_median only works for 1d arrays.")
stacked_signals: List[np.ndarray] = []
<fim_suffix>
stacked_signals = np.stack(stacked_signals)
rolling_median = np.median(stacked_signals, axis=0)
rolling_median = rolling_median[kernel_offset:-kernel_offset]
return rolling_median
<fim_middle>for i in range(-kernel_offset, kernel_offset + 1):
stacked_signals.append(np.roll(signal, i)) | for i in range(-kernel_offset, kernel_offset + 1):
stacked_signals.append(np.roll(signal, i)) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
# open-iris/src/iris/utils/math.py
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
"""
from typing import Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import EyeCentersEstimationError
class BisectorsMethod(Algorithm):
"""Implementation of eye's center estimation algorithm using bisectors method for finding a circle center.
This algorithm samples a given number of bisectors from the pupil and iris polygons, and averages their intersection
to produce the polygon center. This method is robust against noise in the polygons, making it a good choice for
non-perfect shapes. It is also robust to polygons missing parts of the circle arc, making it a good choice for
partially-occluded shapes.
LIMITATIONS:
The iris and pupil can be approximated to circles, when the user is properly gazing at the camera.
This requires that the cases of off-gaze have already been filtered out.
"""
class Parameters(Algorithm.Parameters):
"""Default Parameters for BisectorsMethod algorithm."""
num_bisectors: int = Field(..., gt=0)
min_distance_between_sector_points: float = Field(..., gt=0.0, lt=1.0)
max_iterations: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(
self,
num_bisectors: int = 100,
min_distance_between_sector_points: float = 0.75,
max_iterations: int = 50,
) -> None:
"""Assign parameters.
Args:
num_bisectors (int, optional): Number of bisectors.. Defaults to 100.
min_distance_between_sector_points (float, optional): Minimum distance between sectors expressed as a fractional value of a circular shape diameter. Defaults to 0.75.
max_iterations (int, optional): Max iterations for bisector search.. Defaults to 50.
"""
super().__init__(
num_bisectors=num_bisectors,
min_distance_between_sector_points=min_distance_between_sector_points,
max_iterations=max_iterations,
)
def run(self, geometries: GeometryPolygons) -> EyeCenters:
"""Estimate eye's iris and pupil centers.
Args:
geometries (GeometryPolygons): Geometry polygons.
Returns:
EyeCenters: Eye's centers object.
"""
pupil_center_x, pupil_center_y = self._find_center_coords(geometries.pupil_array, geometries.pupil_diameter)
iris_center_x, iris_center_y = self._find_center_coords(geometries.iris_array, geometries.iris_diameter)
return EyeCenters(pupil_x=pupil_center_x, pupil_y=pupil_center_y, iris_x=iris_center_x, iris_y=iris_center_y)
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
def _calculate_perpendicular_bisectors(
self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.
A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.
Args:
polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.
min_distance_between_sector_points_in_px (float): Minimum distance between sector points.
Raises:
EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!
Returns:
Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.
"""
np.random.seed(142857)
bisectors_first_points = np.empty([0, 2])
bisectors_second_points = np.empty([0, 2])
<fim_suffix>
bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]
bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]
bisectors_center = (bisectors_first_points + bisectors_second_points) / 2
# Flip xs with ys and flip sign of on of them to create a 90deg rotation
inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)
inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]
# Add perpendicular vector to center and normalize
norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)
inv_bisectors_center_slope[:, 0] /= norm
inv_bisectors_center_slope[:, 1] /= norm
first_bisectors_point = bisectors_center - inv_bisectors_center_slope
second_bisectors_point = bisectors_center + inv_bisectors_center_slope
return first_bisectors_point, second_bisectors_point
def _find_best_intersection(self, fst_points: np.ndarray, sec_points: np.ndarray) -> Tuple[float, float]:
"""fst_points and sec_points are NxD arrays defining N lines. D is the dimension of the space.
This function returns the least squares intersection of the N lines from the system given by eq. 13 in
http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf.
Args:
fst_points (np.ndarray): First bisectors points.
sec_points (np.ndarray): Second bisectors points.
Returns:
Tuple[float, float]: Best intersection point.
Reference:
[1] http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf
"""
norm_bisectors = (sec_points - fst_points) / np.linalg.norm(sec_points - fst_points, axis=1)[:, np.newaxis]
# Generate the array of all projectors I - n*n.T
projections = np.eye(norm_bisectors.shape[1]) - norm_bisectors[:, :, np.newaxis] * norm_bisectors[:, np.newaxis]
# Generate R matrix and q vector
R = projections.sum(axis=0)
q = (projections @ fst_points[:, :, np.newaxis]).sum(axis=0)
# Solve the least squares problem for the intersection point p: Rp = q
p = np.linalg.lstsq(R, q, rcond=None)[0]
intersection_x, intersection_y = p
return intersection_x.item(), intersection_y.item()
<fim_middle>for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
mask = norms > min_distance_between_sector_points_in_px
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
) | for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
mask = norms > min_distance_between_sector_points_in_px
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/io/dataclasses.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/callbacks/pipeline_trace.py
def get(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
if result_name not in self._storage.keys():
raise PipelineCallTraceStorageError(f"Unknown result name: {result_name}")
return self._storage[result_name]
# open-iris/src/iris/io/validators.py
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
# open-iris/src/iris/orchestration/output_builders.py
def __safe_serialize(object: Optional[ImmutableModel]) -> Optional[Dict[str, Any]]:
"""Serialize an object.
Args:
object (Optional[ImmutableModel]): Object to be serialized.
Raises:
NotImplementedError: Raised if object is not serializable.
Returns:
Optional[Dict[str, Any]]: Serialized object.
"""
if object is None:
return None
elif isinstance(object, ImmutableModel):
return object.serialize()
elif isinstance(object, (list, tuple)):
return [__safe_serialize(sub_object) for sub_object in object]
else:
raise NotImplementedError(f"Object of type {type(object)} is not serializable.")
"""
from __future__ import annotations
from typing import Any, Dict, List, Literal, Tuple
import numpy as np
from pydantic import Field, NonNegativeInt, root_validator, validator
from iris.io import validators as v
from iris.io.class_configs import ImmutableModel
from iris.utils import math
class IRImage(ImmutableModel):
"""Data holder for input IR image."""
img_data: np.ndarray
eye_side: Literal["left", "right"]
@property
def height(self) -> int:
"""Return IR image's height.
Return:
int: image height.
"""
return self.img_data.shape[0]
@property
def width(self) -> int:
"""Return IR image's width.
Return:
int: image width.
"""
return self.img_data.shape[1]
def serialize(self) -> Dict[str, Any]:
"""Serialize IRImage object.
Returns:
Dict[str, Any]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, Any]) -> IRImage:
"""Deserialize IRImage object.
Args:
data (Dict[str, Any]): Serialized object to dict.
Returns:
IRImage: Deserialized object.
"""
return IRImage(**data)
class SegmentationMap(ImmutableModel):
"""Data holder for the segmentation models predictions."""
predictions: np.ndarray
index2class: Dict[NonNegativeInt, str]
_is_segmap_3_dimensions = validator("predictions", allow_reuse=True)(v.is_array_n_dimensions(3))
@root_validator(pre=True, allow_reuse=True)
def _check_segmap_shape_and_consistency(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check that the number of classes equals the depth of the segmentation map.
Args:
values (Dict[str, Any]): Dictionary with segmap and classes {param_name: data}.
Raises:
ValueError: Raised if there is resolution mismatch between image and mask.
Returns:
Dict[str, Any]: Unmodified values parameter passed for further processing.
"""
if values["predictions"].shape[2] != len(values["index2class"]):
segmap_depth, nb_classes = values["predictions"].shape, len(values["index2class"])
raise ValueError(
f"{cls.__name__}: mismatch between predictions shape {segmap_depth} and classes length {nb_classes}."
)
return values
@property
def height(self) -> int:
"""Return segmap's height.
Return:
int: segmap height.
"""
return self.predictions.shape[0]
@property
def width(self) -> int:
"""Return segmap's width.
Return:
int: segmap width.
"""
return self.predictions.shape[1]
@property
def nb_classes(self) -> int:
"""Return the number of classes of the segmentation map (i.e. nb channels).
Return:
int: number of classes in the segmentation map.
"""
return self.predictions.shape[2]
def __eq__(self, other: object) -> bool:
"""Check if two SegmentationMap objects are equal.
Args:
other (object): Second object to compare.
Returns:
bool: Comparison result.
"""
if not isinstance(other, SegmentationMap):
return False
return self.index2class == other.index2class and np.allclose(self.predictions, other.predictions)
def index_of(self, class_name: str) -> int:
"""Get class index based on its name.
Args:
class_name (str): Class name
Raises:
ValueError: Index of a class
Returns:
int: Raised if `class_name` not found in `index2class` dictionary.
"""
<fim_suffix>
raise ValueError(f"Index for the `{class_name}` not found")
def serialize(self) -> Dict[str, Any]:
"""Serialize SegmentationMap object.
Returns:
Dict[str, Any]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, Any]) -> SegmentationMap:
"""Deserialize SegmentationMap object.
Args:
data (Dict[str, Any]): Serialized object to dict.
Returns:
SegmentationMap: Deserialized object.
"""
return SegmentationMap(**data)
class GeometryMask(ImmutableModel):
"""Data holder for the geometry raster."""
pupil_mask: np.ndarray
iris_mask: np.ndarray
eyeball_mask: np.ndarray
_is_mask_2D = validator("*", allow_reuse=True)(v.is_array_n_dimensions(2))
_is_binary = validator("*", allow_reuse=True)(v.is_binary)
@property
def filled_eyeball_mask(self) -> np.ndarray:
"""Fill eyeball mask.
Returns:
np.ndarray: Eyeball mask with filled iris/pupil "holes".
"""
binary_maps = np.zeros(self.eyeball_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
binary_maps += self.eyeball_mask
return binary_maps.astype(bool)
@property
def filled_iris_mask(self) -> np.ndarray:
"""Fill iris mask.
Returns:
np.ndarray: Iris mask with filled pupil "holes".
"""
binary_maps = np.zeros(self.iris_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
return binary_maps.astype(bool)
def serialize(self) -> Dict[str, Any]:
"""Serialize GeometryMask object.
Returns:
Dict[str, Any]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, Any]) -> GeometryMask:
"""Deserialize GeometryMask object.
Args:
data (Dict[str, Any]): Serialized object to dict.
Returns:
GeometryMask: Deserialized object.
"""
return GeometryMask(**data)
class NoiseMask(ImmutableModel):
"""Data holder for the refined geometry masks."""
mask: np.ndarray
_is_mask_2D = validator("mask", allow_reuse=True)(v.is_array_n_dimensions(2))
_is_binary = validator("*", allow_reuse=True)(v.is_binary)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize NoiseMask object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, np.ndarray]) -> NoiseMask:
"""Deserialize NoiseMask object.
Args:
data (Dict[str, np.ndarray]): Serialized object to dict.
Returns:
NoiseMask: Deserialized object.
"""
return NoiseMask(**data)
class GeometryPolygons(ImmutableModel):
"""Data holder for the refined geometry polygons. Input np.ndarrays are mandatorily converted to np.float32 dtype for compatibility with some downstream tasks such as MomentsOfArea."""
pupil_array: np.ndarray
iris_array: np.ndarray
eyeball_array: np.ndarray
_is_list_of_points = validator("*", allow_reuse=True)(v.is_list_of_points)
_convert_dtype = validator("*", allow_reuse=True)(v.to_dtype_float32)
@property
def pupil_diameter(self) -> float:
"""Return pupil diameter.
Returns:
float: pupil diameter.
"""
return math.estimate_diameter(self.pupil_array)
@property
def iris_diameter(self) -> float:
"""Return iris diameter.
Returns:
float: iris diameter.
"""
return math.estimate_diameter(self.iris_array)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize GeometryPolygons object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
return {"pupil": self.pupil_array, "iris": self.iris_array, "eyeball": self.eyeball_array}
@staticmethod
def deserialize(data: Dict[str, np.ndarray]) -> GeometryPolygons:
"""Deserialize GeometryPolygons object.
Args:
data (Dict[str, np.ndarray]): Serialized object to dict.
Returns:
GeometryPolygons: Deserialized object.
"""
data = {"pupil_array": data["pupil"], "iris_array": data["iris"], "eyeball_array": data["eyeball"]}
return GeometryPolygons(**data)
class EyeOrientation(ImmutableModel):
"""Data holder for the eye orientation. The angle must be comprised between -pi/2 (included) and pi/2 (excluded)."""
angle: float = Field(..., ge=-np.pi / 2, lt=np.pi / 2)
def serialize(self) -> float:
"""Serialize EyeOrientation object.
Returns:
float: Serialized object.
"""
return self.angle
@staticmethod
def deserialize(data: float) -> EyeOrientation:
"""Deserialize EyeOrientation object.
Args:
data (float): Serialized object to float.
Returns:
EyeOrientation: Deserialized object.
"""
return EyeOrientation(angle=data)
class EyeCenters(ImmutableModel):
"""Data holder for eye's centers."""
pupil_x: float
pupil_y: float
iris_x: float
iris_y: float
@property
def center_distance(self) -> float:
"""Return distance between pupil and iris center.
Return:
float: center distance.
"""
return np.linalg.norm([self.iris_x - self.pupil_x, self.iris_y - self.pupil_y])
def serialize(self) -> Dict[str, Tuple[float]]:
"""Serialize EyeCenters object.
Returns:
Dict[str, Tuple[float]]: Serialized object.
"""
return {"iris_center": (self.iris_x, self.iris_y), "pupil_center": (self.pupil_x, self.pupil_y)}
@staticmethod
def deserialize(data: Dict[str, Tuple[float]]) -> EyeCenters:
"""Deserialize EyeCenters object.
Args:
data (Dict[str, Tuple[float]]): Serialized object to dict.
Returns:
EyeCenters: Deserialized object.
"""
data = {
"pupil_x": data["pupil_center"][0],
"pupil_y": data["pupil_center"][1],
"iris_x": data["iris_center"][0],
"iris_y": data["iris_center"][1],
}
return EyeCenters(**data)
class Offgaze(ImmutableModel):
"""Data holder for offgaze score."""
score: float = Field(..., ge=0.0, le=1.0)
def serialize(self) -> float:
"""Serialize Offgaze object.
Returns:
float: Serialized object.
"""
return self.score
@staticmethod
def deserialize(data: float) -> Offgaze:
"""Deserialize Offgaze object.
Args:
data (float): Serialized object to float.
Returns:
Offgaze: Deserialized object.
"""
return Offgaze(score=data)
class PupilToIrisProperty(ImmutableModel):
"""Data holder for pupil-ro-iris ratios."""
pupil_to_iris_diameter_ratio: float = Field(..., gt=0, lt=1)
pupil_to_iris_center_dist_ratio: float = Field(..., ge=0, lt=1)
def serialize(self) -> Dict[str, float]:
"""Serialize PupilToIrisProperty object.
Returns:
Dict[str, float]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, float]) -> PupilToIrisProperty:
"""Deserialize PupilToIrisProperty object.
Args:
data (Dict[str, float]): Serialized object to dict.
Returns:
PupilToIrisProperty: Deserialized object.
"""
return PupilToIrisProperty(**data)
class Landmarks(ImmutableModel):
"""Data holder for eye's landmarks."""
pupil_landmarks: np.ndarray
iris_landmarks: np.ndarray
eyeball_landmarks: np.ndarray
_is_list_of_points = validator("*", allow_reuse=True)(v.is_list_of_points)
def serialize(self) -> Dict[str, List[float]]:
"""Serialize Landmarks object.
Returns:
Dict[str, List[float]]: Serialized object.
"""
return {
"pupil": self.pupil_landmarks.tolist(),
"iris": self.iris_landmarks.tolist(),
"eyeball": self.eyeball_landmarks.tolist(),
}
@staticmethod
def deserialize(data: Dict[str, List[float]]) -> Landmarks:
"""Deserialize Landmarks object.
Args:
data (Dict[str, List[float]]): Serialized object to dict.
Returns:
Landmarks: Deserialized object.
"""
data = {
"pupil_landmarks": np.array(data["pupil"]),
"iris_landmarks": np.array(data["iris"]),
"eyeball_landmarks": np.array(data["eyeball"]),
}
return Landmarks(**data)
class BoundingBox(ImmutableModel):
"""Data holder for eye's bounding box."""
x_min: float
y_min: float
x_max: float
y_max: float
_is_valid_bbox = root_validator(pre=True, allow_reuse=True)(v.is_valid_bbox)
def serialize(self) -> Dict[str, float]:
"""Serialize BoundingBox object.
Returns:
Dict[str, float]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, float]) -> BoundingBox:
"""Deserialize BoundingBox object.
Args:
data (Dict[str, float]): Serialized object to dict.
Returns:
BoundingBox: Deserialized object.
"""
return BoundingBox(**data)
class NormalizedIris(ImmutableModel):
"""Data holder for the normalized iris images."""
normalized_image: np.ndarray
normalized_mask: np.ndarray
_is_array_2D = validator("*", allow_reuse=True)(v.is_array_n_dimensions(2))
_is_binary = validator("normalized_mask", allow_reuse=True)(v.is_binary)
_img_mask_shape_match = root_validator(pre=True, allow_reuse=True)(
v.are_shapes_equal("normalized_image", "normalized_mask")
)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize NormalizedIris object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, np.ndarray]) -> NormalizedIris:
"""Deserialize NormalizedIris object.
Args:
data (Dict[str, np.ndarray]): Serialized object to dict.
Returns:
NormalizedIris: Deserialized object.
"""
return NormalizedIris(**data)
class IrisFilterResponse(ImmutableModel):
"""Data holder for filter bank response with associated mask."""
iris_responses: List[np.ndarray]
mask_responses: List[np.ndarray]
_responses_mask_shape_match = root_validator(pre=True, allow_reuse=True)(
v.are_all_shapes_equal("iris_responses", "mask_responses")
)
def serialize(self) -> Dict[str, List[np.ndarray]]:
"""Serialize IrisFilterResponse object.
Returns:
Dict[str, List[np.ndarray]]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, List[np.ndarray]]) -> IrisFilterResponse:
"""Deserialize IrisFilterResponse object.
Args:
data (Dict[str, List[np.ndarray]]): Serialized object to dict.
Returns:
IrisFilterResponse: Deserialized object.
"""
return IrisFilterResponse(**data)
class IrisTemplate(ImmutableModel):
"""Data holder for final iris template with mask."""
iris_codes: List[np.ndarray]
mask_codes: List[np.ndarray]
_responses_mask_shape_match = root_validator(pre=True, allow_reuse=True)(
v.are_all_shapes_equal("iris_codes", "mask_codes")
)
_is_binary = validator("*", allow_reuse=True, each_item=True)(v.is_binary)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize IrisTemplate object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
stacked_iris_codes = np.stack(self.iris_codes)
stacked_iris_codes = stacked_iris_codes.transpose(1, 2, 0, 3)
stacked_mask_codes = np.stack(self.mask_codes)
stacked_mask_codes = stacked_mask_codes.transpose(1, 2, 0, 3)
return {
"iris_codes": stacked_iris_codes,
"mask_codes": stacked_mask_codes,
}
class EyeOcclusion(ImmutableModel):
"""Data holder for the eye occlusion."""
visible_fraction: float = Field(..., ge=-0.0, le=1.0)
def serialize(self) -> float:
"""Serialize EyeOcclusion object.
Returns:
float: Serialized object.
"""
return self.visible_fraction
@staticmethod
def deserialize(data: float) -> EyeOcclusion:
"""Deserialize EyeOcclusion object.
Args:
data (float): Serialized object to float.
Returns:
EyeOcclusion: Deserialized object.
"""
return EyeOcclusion(visible_fraction=data)
<fim_middle>for index, name in self.index2class.items():
if name == class_name:
return index | for index, name in self.index2class.items():
if name == class_name:
return index | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/conv_filter_bank.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/encoder/iris_encoder.py
def run(self, response: IrisFilterResponse) -> IrisTemplate:
"""Encode iris code and mask code.
Args:
response (IrisFilterResponse): Filter responses.
Returns:
IrisTemplate: Final iris template.
"""
iris_codes: List[np.ndarray] = []
mask_codes: List[np.ndarray] = []
for iris_response, mask_response in zip(response.iris_responses, response.mask_responses):
mask_code = mask_response >= self.params.mask_threshold
iris_code = np.stack([iris_response.real > 0, iris_response.imag > 0], axis=-1)
mask_code = np.stack([mask_code, mask_code], axis=-1)
iris_codes.append(iris_code)
mask_codes.append(mask_code)
return IrisTemplate(iris_codes=iris_codes, mask_codes=mask_codes)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
# open-iris/src/iris/nodes/geometry_refinement/contour_interpolation.py
def _interpolate_polygon_points(self, polygon: np.ndarray, max_distance_between_points_px: float) -> np.ndarray:
"""Interpolate contours points, so that the distance between two is no greater than `self.params.max_distance_between_boundary_points` in pixel space.
Args:
polygon (np.ndarray): Contour polygons.
max_distance_between_points_px (float): `self.params.max_distance_between_boundary_points` expressed in pixel length relative to iris diameter.
Returns:
np.ndarray: Interpolated polygon points.
"""
previous_boundary = np.roll(polygon, shift=1, axis=0)
distances = np.linalg.norm(polygon - previous_boundary, axis=1)
num_points = np.ceil(distances / max_distance_between_points_px).astype(int)
x: List[np.ndarray] = []
y: List[np.ndarray] = []
for (x1, y1), (x2, y2), num_point in zip(previous_boundary, polygon, num_points):
x.append(np.linspace(x1, x2, num=num_point, endpoint=False))
y.append(np.linspace(y1, y2, num=num_point, endpoint=False))
new_boundary = np.stack([np.concatenate(x), np.concatenate(y)], axis=1)
_, indices = np.unique(new_boundary, axis=0, return_index=True)
new_boundary = new_boundary[np.sort(indices)]
return new_boundary
"""
from typing import List, Tuple
import numpy as np
from pydantic import root_validator, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import IrisFilterResponse, NormalizedIris
from iris.io.validators import are_lengths_equal, is_not_empty
from iris.nodes.iris_response.image_filters.gabor_filters import GaborFilter
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
from iris.nodes.iris_response.probe_schemas.regular_probe_schema import RegularProbeSchema
def polar_img_padding(img: np.ndarray, p_rows: int, p_cols: int) -> np.ndarray:
"""Apply zero-padding vertically and rotate-padding horizontally to a normalized image in polar coordinates.
Args:
img (np.ndarray): normalized image in polar coordinates.
p_rows (int): padding size on top and bottom.
p_cols (int): padding size on left and right.
Returns:
np.ndarray: padded image.
"""
i_rows, i_cols = img.shape
padded_image = np.zeros((i_rows + 2 * p_rows, i_cols + 2 * p_cols))
padded_image[p_rows : i_rows + p_rows, p_cols : i_cols + p_cols] = img
padded_image[p_rows : i_rows + p_rows, 0:p_cols] = img[:, -p_cols:]
padded_image[p_rows : i_rows + p_rows, -p_cols:] = img[:, 0:p_cols]
return padded_image
class ConvFilterBank(Algorithm):
"""Apply filter bank.
Algorithm steps:
1) Obtain filters and corresponding probe schemas.
2) Apply convolution to a given pair of normalized iris image using the filters and probe schemas.
3) Generate the iris response and corresponding mask response.
"""
class Parameters(Algorithm.Parameters):
"""Default ConvFilterBank parameters."""
filters: List[ImageFilter]
probe_schemas: List[ProbeSchema]
# Validators
_are_lengths_equal = root_validator(pre=True, allow_reuse=True)(are_lengths_equal("probe_schemas", "filters"))
_is_not_empty = validator("*", allow_reuse=True)(is_not_empty)
__parameters_type__ = Parameters
def __init__(
self,
filters: List[ImageFilter] = [
GaborFilter(
kernel_size=(41, 21),
sigma_phi=7,
sigma_rho=6.13,
theta_degrees=90.0,
lambda_phi=28,
dc_correction=True,
to_fixpoints=True,
),
GaborFilter(
kernel_size=(17, 21),
sigma_phi=2,
sigma_rho=5.86,
theta_degrees=90.0,
lambda_phi=8,
dc_correction=True,
to_fixpoints=True,
),
],
probe_schemas: List[ProbeSchema] = [
RegularProbeSchema(n_rows=16, n_cols=256),
RegularProbeSchema(n_rows=16, n_cols=256),
],
) -> None:
"""Assign parameters.
Args:
filters (List[ImageFilter]): List of image filters.
probe_schemas (List[ProbeSchema]): List of corresponding probe schemas.
"""
super().__init__(filters=filters, probe_schemas=probe_schemas)
def run(self, normalization_output: NormalizedIris) -> IrisFilterResponse:
"""Apply filters to a normalized iris image.
Args:
normalization_output (NormalizedIris): Output of the normalization process.
Returns:
IrisFilterResponse: filter responses.
"""
iris_responses: List[np.ndarray] = []
mask_responses: List[np.ndarray] = []
<fim_suffix>
return IrisFilterResponse(iris_responses=iris_responses, mask_responses=mask_responses)
def _convolve(
self, img_filter: ImageFilter, probe_schema: ProbeSchema, normalization_output: NormalizedIris
) -> Tuple[np.ndarray, np.ndarray]:
"""Apply convolution to a given normalized iris image with the filter and probe schema.
Args:
img_filter (ImageFilter): filter used for convolution.
probe_schema (ProbeSchema): probe schema used for convolution.
normalization_output (NormalizedIris): Output of the normalization process.
Returns:
Tuple[np.ndarray, np.ndarray]: iris response and mask response.
"""
i_rows, i_cols = normalization_output.normalized_image.shape
k_rows, k_cols = img_filter.kernel_values.shape
p_rows = k_rows // 2
p_cols = k_cols // 2
iris_response = np.zeros((probe_schema.params.n_rows, probe_schema.params.n_cols), dtype=np.complex64)
mask_response = np.zeros((probe_schema.params.n_rows, probe_schema.params.n_cols))
padded_iris = polar_img_padding(normalization_output.normalized_image, 0, p_cols)
padded_mask = polar_img_padding(normalization_output.normalized_mask, 0, p_cols)
for i in range(probe_schema.params.n_rows):
for j in range(probe_schema.params.n_cols):
# Convert probe_schema position to integer pixel position.
pos = i * probe_schema.params.n_cols + j
r_probe = min(round(probe_schema.rhos[pos] * i_rows), i_rows - 1)
c_probe = min(round(probe_schema.phis[pos] * i_cols), i_cols - 1)
# Get patch from image centered at [i,j] probed pixel position.
rtop = max(0, r_probe - p_rows)
rbot = min(r_probe + p_rows + 1, i_rows - 1)
iris_patch = padded_iris[rtop:rbot, c_probe : c_probe + k_cols]
mask_patch = padded_mask[rtop:rbot, c_probe : c_probe + k_cols]
# Perform convolution at [i,j] probed pixel position.
ktop = p_rows - iris_patch.shape[0] // 2
iris_response[i][j] = (
(iris_patch * img_filter.kernel_values[ktop : ktop + iris_patch.shape[0], :]).sum()
/ iris_patch.shape[0]
/ k_cols
)
mask_response[i][j] = (
0 if iris_response[i][j] == 0 else (mask_patch.sum() / iris_patch.shape[0] / k_cols)
)
return iris_response, mask_response
<fim_middle>for i_filter, i_schema in zip(self.params.filters, self.params.probe_schemas):
iris_response, mask_response = self._convolve(i_filter, i_schema, normalization_output)
iris_responses.append(iris_response)
mask_responses.append(mask_response) | for i_filter, i_schema in zip(self.params.filters, self.params.probe_schemas):
iris_response, mask_response = self._convolve(i_filter, i_schema, normalization_output)
iris_responses.append(iris_response)
mask_responses.append(mask_response) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/callbacks/pipeline_trace.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/pipelines/iris_pipeline.py
def _init_pipeline_tracing(self) -> PipelineCallTraceStorage:
"""Instantiate mechanisms for intermediate results tracing.
Returns:
PipelineCallTraceStorage: Pipeline intermediate and final results storage.
"""
call_trace = self.env.call_trace_class(results_names=self.nodes.keys())
for algorithm_name, algorithm_object in self.nodes.items():
algorithm_object._callbacks.append(NodeResultsWriter(call_trace, algorithm_name))
return call_trace
# open-iris/src/iris/orchestration/output_builders.py
def build_debugging_output(call_trace: PipelineCallTraceStorage) -> Dict[str, Any]:
"""Build the output for debugging purposes.
Args:
call_trace (PipelineCallTraceStorage): Pipeline call results storage.
Returns:
Dict[str, Any]: Returns data to be stored in MongoDB.
"""
iris_template = __safe_serialize(call_trace["encoder"])
metadata = __get_metadata(call_trace=call_trace)
error = __get_error(call_trace=call_trace)
segmap = call_trace["segmentation"]
geometry_mask, noise_mask = (
call_trace["segmentation_binarization"] if call_trace["segmentation_binarization"] is None else (None, None)
)
extrapolated_polygons = call_trace["geometry_estimation"]
normalized_iris = call_trace["normalization"]
iris_response = call_trace["filter_bank"]
return {
"iris_template": iris_template,
"metadata": metadata,
"segmentation_map": __safe_serialize(segmap),
"segmentation_binarization": {
"geometry": __safe_serialize(geometry_mask),
"noise": __safe_serialize(noise_mask),
},
"extrapolated_polygons": __safe_serialize(extrapolated_polygons),
"normalized_iris": __safe_serialize(normalized_iris),
"iris_response": __safe_serialize(iris_response),
"error": error,
}
# open-iris/src/iris/io/dataclasses.py
def __eq__(self, other: object) -> bool:
"""Check if two SegmentationMap objects are equal.
Args:
other (object): Second object to compare.
Returns:
bool: Comparison result.
"""
if not isinstance(other, SegmentationMap):
return False
return self.index2class == other.index2class and np.allclose(self.predictions, other.predictions)
"""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.orchestration.pipeline_dataclasses import PipelineNode
class PipelineCallTraceStorageError(Exception):
"""PipelineCallTraceStorage error class."""
pass
class PipelineCallTraceStorage:
"""A storage object for pipeline input, intermediate and final results."""
INPUT_KEY_NAME = "input"
ERROR_KEY_NAME = "error"
def __init__(self, results_names: Iterable[str]) -> None:
"""Assign parameters.
Args:
results_names (Iterable[str]): Create list of available keys in the storage.
"""
self._storage = self._init_storage(results_names)
def __getitem__(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
return self.get(result_name)
def __len__(self) -> int:
"""Get storage capacity.
Returns:
int: Storage capacity
"""
return len(self._storage.keys())
def get(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
if result_name not in self._storage.keys():
raise PipelineCallTraceStorageError(f"Unknown result name: {result_name}")
return self._storage[result_name]
def get_input(self) -> Any:
"""Return pipeline input.
Returns:
Any: Input to pipeline.
"""
return self.get(PipelineCallTraceStorage.INPUT_KEY_NAME)
def get_error(self) -> Optional[Exception]:
"""Return stored error.
Returns:
Optional[Exception]: error.
"""
return self.get(PipelineCallTraceStorage.ERROR_KEY_NAME)
def write(self, result_name: str, result: Any) -> None:
"""Write a result to a storage saved under the name `result_name`.
Args:
result_name (str): Result name.
result (Any): Result reference to save.
"""
self._storage[result_name] = result
def write_input(self, in_value: Any) -> None:
"""Save `in_value` in storage.
Args:
in_value (Any): Input value.
"""
self._storage[PipelineCallTraceStorage.INPUT_KEY_NAME] = in_value
def write_error(self, error: Exception) -> None:
"""Save `error` in storage.
Args:
error (Exception): error to store.
"""
self._storage[PipelineCallTraceStorage.ERROR_KEY_NAME] = error
def clean(self) -> None:
"""Clean storage by setting all result references to None."""
<fim_suffix>
def _init_storage(self, results_names: Iterable[str]) -> Dict[str, None]:
"""Initialize storage (dict) with proper names and None values as results.
Args:
results_names (Iterable[str]): Result names.
Returns:
Dict[str, None]: Storage dictionary.
"""
storage = {name: None for name in results_names}
storage[PipelineCallTraceStorage.INPUT_KEY_NAME] = None
storage[PipelineCallTraceStorage.ERROR_KEY_NAME] = None
return storage
@staticmethod
def initialise(nodes: Dict[str, Algorithm], pipeline_nodes: List[PipelineNode]) -> PipelineCallTraceStorage:
"""Instantiate mechanisms for intermediate results tracing.
Args:
nodes (Dict[str, Algorithm]): Mapping between nodes names and the corresponding instanciated nodes.
pipeline_nodes (List[PipelineNode]): List of nodes as declared in the input config. Not used in this function.
Returns:
PipelineCallTraceStorage: Pipeline intermediate and final results storage.
"""
call_trace = PipelineCallTraceStorage(results_names=nodes.keys())
for algorithm_name, algorithm_object in nodes.items():
algorithm_object._callbacks.append(NodeResultsWriter(call_trace, algorithm_name))
return call_trace
class NodeResultsWriter(Callback):
"""A node call results writer Callback class."""
def __init__(self, trace_storage_reference: PipelineCallTraceStorage, result_name: str) -> None:
"""Assign parameters.
Args:
trace_storage_reference (PipelineCallTraceStorage): Storage object reference to write.
result_name (str): Result name under which result should be written.
"""
self._trace_storage_reference = trace_storage_reference
self._result_name = result_name
def on_execute_end(self, result: Any) -> None:
"""Write on node execution end.
Args:
result (Any): Result of node call.
"""
self._trace_storage_reference.write(self._result_name, result)
<fim_middle>for result_name in self._storage.keys():
self._storage[result_name] = None | for result_name in self._storage.keys():
self._storage[result_name] = None | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/geometry_refinement/contour_interpolation.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/utils/math.py
def estimate_diameter(polygon: np.ndarray) -> float:
"""Estimates the diameter of an arbitrary arc by evaluating the maximum distance between any two points on the arc.
Args:
polygon (np.ndarray): Polygon points.
Returns:
float: Estimated diameter length.
Reference:
[1] https://sparrow.dev/pairwise-distance-in-numpy/
"""
return float(np.linalg.norm(polygon[:, None, :] - polygon[None, :, :], axis=-1).max())
# open-iris/src/iris/nodes/geometry_refinement/contour_points_filter.py
def __init__(self, min_distance_to_noise_and_eyeball: float = 0.005) -> None:
"""Assign parameters.
Args:
min_distance_to_noise_and_eyeball (float, optional): Minimum distance to eyeball or noise expressed as a fraction of iris diameter length. Defaults to 0.025.
"""
super().__init__(min_distance_to_noise_and_eyeball=min_distance_to_noise_and_eyeball)
# open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
"""
from typing import List
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import GeometryPolygons
class ContourInterpolation(Algorithm):
"""Implementation of contour interpolation algorithm conditioned by given NoiseMask.
Algorithm performs linar interpolation of points between vectorized, predicted points such that maximum distance between two consecutive points in a polygon isn't greater than
a fraction of a iris diameter length specified as `max_distance_between_boundary_points` parameter.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for ContourInterpolation objects."""
max_distance_between_boundary_points: float = Field(..., gt=0.0, lt=1.0)
__parameters_type__ = Parameters
def __init__(self, max_distance_between_boundary_points: float = 0.01) -> None:
"""Assign parameters.
Args:
max_distance_between_boundary_points (float, optional): Maximum distance between boundary contour points expressed as a fraction of a iris diameter length. Defaults to 0.01.
"""
super().__init__(max_distance_between_boundary_points=max_distance_between_boundary_points)
def run(self, polygons: GeometryPolygons) -> GeometryPolygons:
"""Refine polygons by interpolating contour points.
Args:
polygons (GeometryPolygons): Polygons to refine.
Returns:
GeometryPolygons: Refined polygons.
"""
max_boundary_dist_in_px = self.params.max_distance_between_boundary_points * polygons.iris_diameter
refined_pupil_array = self._interpolate_polygon_points(polygons.pupil_array, max_boundary_dist_in_px)
refined_iris_array = self._interpolate_polygon_points(polygons.iris_array, max_boundary_dist_in_px)
refined_eyeball_array = self._interpolate_polygon_points(polygons.eyeball_array, max_boundary_dist_in_px)
return GeometryPolygons(
pupil_array=refined_pupil_array,
iris_array=refined_iris_array,
eyeball_array=refined_eyeball_array,
)
def _interpolate_polygon_points(self, polygon: np.ndarray, max_distance_between_points_px: float) -> np.ndarray:
"""Interpolate contours points, so that the distance between two is no greater than `self.params.max_distance_between_boundary_points` in pixel space.
Args:
polygon (np.ndarray): Contour polygons.
max_distance_between_points_px (float): `self.params.max_distance_between_boundary_points` expressed in pixel length relative to iris diameter.
Returns:
np.ndarray: Interpolated polygon points.
"""
previous_boundary = np.roll(polygon, shift=1, axis=0)
distances = np.linalg.norm(polygon - previous_boundary, axis=1)
num_points = np.ceil(distances / max_distance_between_points_px).astype(int)
x: List[np.ndarray] = []
y: List[np.ndarray] = []
<fim_suffix>
new_boundary = np.stack([np.concatenate(x), np.concatenate(y)], axis=1)
_, indices = np.unique(new_boundary, axis=0, return_index=True)
new_boundary = new_boundary[np.sort(indices)]
return new_boundary
<fim_middle>for (x1, y1), (x2, y2), num_point in zip(previous_boundary, polygon, num_points):
x.append(np.linspace(x1, x2, num=num_point, endpoint=False))
y.append(np.linspace(y1, y2, num=num_point, endpoint=False)) | for (x1, y1), (x2, y2), num_point in zip(previous_boundary, polygon, num_points):
x.append(np.linspace(x1, x2, num=num_point, endpoint=False))
y.append(np.linspace(y1, y2, num=num_point, endpoint=False)) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/perspective_normalization.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _generate_correspondences(self, pupil_points: np.ndarray, iris_points: np.ndarray) -> np.ndarray:
"""Generate corresponding positions in original image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points x 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points x 2).
Returns:
np.ndarray: generated corresponding points.
"""
pupil_diameter = math.estimate_diameter(pupil_points)
iris_diameter = math.estimate_diameter(iris_points)
p2i_ratio = pupil_diameter / iris_diameter
if p2i_ratio <= 0 or p2i_ratio >= 1:
raise NormalizationError(f"Invalid pupil to iris ratio, not in the range (0,1): {p2i_ratio}.")
src_points = np.array(
[
pupil_points + x * (iris_points - pupil_points)
for x in self.params.intermediate_radiuses[round(100 * (p2i_ratio))]
]
)
return src_points
# open-iris/src/iris/nodes/normalization/common.py
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
"""
from typing import Collection, List, Tuple
import cv2
import numpy as np
from pydantic import Field, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeOrientation, GeometryPolygons, IRImage, NoiseMask, NormalizedIris
from iris.io.errors import NormalizationError
from iris.nodes.normalization.common import correct_orientation, generate_iris_mask, interpolate_pixel_intensity
class PerspectiveNormalization(Algorithm):
"""Implementation of a normalization algorithm which uses perspective transformation to map image pixels.
Algorithm steps:
1) Create a grid of trapezoids around iris in original image based on following algorithm parameters: res_in_phi, res_in_r, intermediate_radiuses.
2) Create a grid of corresponding to each trapezoid rectangles in normalized image.
3) For each corresponding trapezoid, rectangle pair compute perspective matrix to estimate normalized image pixel location in an original image location.
4) Map each normalized image pixel to original image pixel based on estimated perspective matrix and perform bilinear interpolation if necessary.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for PerspectiveNormalization."""
res_in_phi: int = Field(..., gt=0)
res_in_r: int = Field(..., gt=0)
skip_boundary_points: int = Field(..., gt=0)
intermediate_radiuses: Collection[float]
oversat_threshold: int = Field(..., gt=0)
@validator("intermediate_radiuses")
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
__parameters_type__ = Parameters
def __init__(
self,
res_in_phi: int = 512,
res_in_r: int = 128,
skip_boundary_points: int = 1,
intermediate_radiuses: Collection[float] = np.linspace(0.0, 1.0, 8),
oversat_threshold: int = 254,
) -> None:
"""Assign parameters.
Args:
res_in_phi (int): Normalized image phi resolution. Defaults to 512.
res_in_r (int): Normalized image r resolution. Defaults to 128.
skip_boundary_points (int, optional): Take every nth point from estimated boundaries when generating correspondences.
Defaults to 1.
intermediate_radiuses (t.Iterable[float], optional): Intermediate rings radiuses used to generate additional points for estimating transformations.
Defaults to np.linspace(0.0, 1.0, 8).
oversat_threshold (int, optional): threshold for masking over-satuated pixels. Defaults to 254.
"""
super().__init__(
res_in_phi=res_in_phi,
res_in_r=res_in_r,
skip_boundary_points=skip_boundary_points,
intermediate_radiuses=intermediate_radiuses,
oversat_threshold=oversat_threshold,
)
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using perspective transformation estimated for every region of an image separately.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points, dst_points = self._generate_correspondences(pupil_points, iris_points)
normalized_iris = NormalizedIris(
normalized_image=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=np.float32),
normalized_mask=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=bool),
)
for angle_point_idx in range(src_points.shape[1] - 1):
for ring_idx in range(src_points.shape[0] - 1):
current_src, current_dst = self._correspondence_rois_coords(
angle_idx=angle_point_idx,
ring_idx=ring_idx,
src_points=src_points,
dst_points=dst_points,
)
xmin, ymin, xmax, ymax = self._bbox_coords(current_dst)
normalized_image_roi, normalized_mask_roi = self._normalize_roi(
original_image=image.img_data,
iris_mask=iris_mask,
src_points=current_src.astype(np.float32),
dst_points=current_dst.astype(np.float32),
normalize_roi_output_shape=(ymax - ymin, xmax - xmin),
)
normalized_iris.normalized_image[ymin:ymax, xmin:xmax] = normalized_image_roi
normalized_iris.normalized_mask[ymin:ymax, xmin:xmax] = normalized_mask_roi
return normalized_iris
def _generate_correspondences(
self, pupil_points: np.ndarray, iris_points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate correspondences between points in original image and normalized image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with generated correspondences.
"""
pupil_points = pupil_points[:: self.params.skip_boundary_points]
iris_points = iris_points[:: self.params.skip_boundary_points]
src_points = []
<fim_suffix>
src_points = np.array(src_points)
num_rings, num_ring_points = src_points.shape[:2]
dst_xs, dst_ys = np.meshgrid(
np.linspace(0, self.params.res_in_phi, num_ring_points).astype(int),
np.linspace(0, self.params.res_in_r, num_rings).astype(int),
)
dst_points = np.array([dst_xs, dst_ys]).transpose((1, 2, 0))
return src_points, dst_points
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
def _correspondence_rois_coords(
self,
angle_idx: int,
ring_idx: int,
src_points: np.ndarray,
dst_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single correspondence ROIs between original image and normalized one based on angle index and ring index.
Args:
angle_idx (int): Boundary point angle index.
ring_idx (int): Intermediate ring index.
src_points (np.ndarray): All mapping points from an original image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
dst_points (np.ndarray): All mapping points from an normalized image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with extracted from src_points and dst_points ROIs.
"""
src_roi = src_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
dst_roi = dst_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
return src_roi.reshape(4, 2), dst_roi.reshape(4, 2)
@staticmethod
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
@staticmethod
def homogeneous2cartesian(points: np.ndarray) -> np.ndarray:
"""Convert points in homogeneous coordinates to cartesian coordinates.
Args:
points (np.ndarray): Points in homogeneous coordinates. Array should be in format: [[x values], [y values], [perspective scale values]].
Returns:
np.ndarray: Points in cartesian coordinates. Returned array will have format: [[x values], [y values]].
"""
points /= points[-1]
points = points[:2]
return points
<fim_middle>for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring) | for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/perspective_normalization.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using nonlinear transformation when sampling points from cartisian to polar coordinates.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points = self._generate_correspondences(pupil_points, iris_points)
normalized_image, normalized_mask = self._normalize_all(
original_image=image.img_data, iris_mask=iris_mask, src_points=src_points
)
normalized_iris = NormalizedIris(
normalized_image=normalized_image,
normalized_mask=normalized_mask,
)
return normalized_iris
# open-iris/src/iris/nodes/eye_properties_estimation/occlusion_calculator.py
def run(
self,
extrapolated_polygons: GeometryPolygons,
noise_mask: NoiseMask,
eye_orientation: EyeOrientation,
eye_centers: EyeCenters,
) -> EyeOcclusion:
"""Compute the iris visible fraction.
Args:
extrapolated_polygons (GeometryPolygons): Extrapolated polygons contours.
noise_mask (NoiseMask): Noise mask.
eye_orientation (EyeOrientation): Eye orientation angle.
eye_centers (EyeCenters): Eye centers.
Returns:
EyeOcclusion: Visible iris fraction.
"""
if self.params.quantile_angle == 0.0:
return EyeOcclusion(visible_fraction=0.0)
xs2mask, ys2mask = self._get_quantile_points(extrapolated_polygons.iris_array, eye_orientation, eye_centers)
img_h, img_w = noise_mask.mask.shape
iris_mask_quantile = common.contour_to_mask(np.column_stack([xs2mask, ys2mask]), mask_shape=(img_w, img_h))
pupil_mask = common.contour_to_mask(extrapolated_polygons.pupil_array, mask_shape=(img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_polygons.eyeball_array, mask_shape=(img_w, img_h))
visible_iris_mask = iris_mask_quantile & ~pupil_mask & eyeball_mask & ~noise_mask.mask
extrapolated_iris_mask = iris_mask_quantile & ~pupil_mask
if extrapolated_iris_mask.sum() == 0:
return EyeOcclusion(visible_fraction=0.0)
visible_fraction = visible_iris_mask.sum() / extrapolated_iris_mask.sum()
return EyeOcclusion(visible_fraction=visible_fraction)
# open-iris/src/iris/nodes/validators/cross_object_validators.py
def _check_correct_percentage(self, polygon: np.ndarray, min_allowed_percentage: float, ir_image: IRImage) -> bool:
"""Check percentage of points withing image based on minimal specified threshold.
Args:
polygon (np.ndarray): polygon to verify.
min_allowed_percentage (float): minimal allowed percentage of points that must be within an image.
ir_image (IRImage): ir image object.
Returns:
bool: Check result.
"""
num_points_inside_image: float = np.sum(
np.all(np.logical_and((0, 0) <= polygon, polygon <= (ir_image.width, ir_image.height)), axis=1)
)
percentage_points_inside_image = num_points_inside_image / len(polygon)
return percentage_points_inside_image >= min_allowed_percentage
"""
from typing import Collection, List, Tuple
import cv2
import numpy as np
from pydantic import Field, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeOrientation, GeometryPolygons, IRImage, NoiseMask, NormalizedIris
from iris.io.errors import NormalizationError
from iris.nodes.normalization.common import correct_orientation, generate_iris_mask, interpolate_pixel_intensity
class PerspectiveNormalization(Algorithm):
"""Implementation of a normalization algorithm which uses perspective transformation to map image pixels.
Algorithm steps:
1) Create a grid of trapezoids around iris in original image based on following algorithm parameters: res_in_phi, res_in_r, intermediate_radiuses.
2) Create a grid of corresponding to each trapezoid rectangles in normalized image.
3) For each corresponding trapezoid, rectangle pair compute perspective matrix to estimate normalized image pixel location in an original image location.
4) Map each normalized image pixel to original image pixel based on estimated perspective matrix and perform bilinear interpolation if necessary.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for PerspectiveNormalization."""
res_in_phi: int = Field(..., gt=0)
res_in_r: int = Field(..., gt=0)
skip_boundary_points: int = Field(..., gt=0)
intermediate_radiuses: Collection[float]
oversat_threshold: int = Field(..., gt=0)
@validator("intermediate_radiuses")
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
__parameters_type__ = Parameters
def __init__(
self,
res_in_phi: int = 512,
res_in_r: int = 128,
skip_boundary_points: int = 1,
intermediate_radiuses: Collection[float] = np.linspace(0.0, 1.0, 8),
oversat_threshold: int = 254,
) -> None:
"""Assign parameters.
Args:
res_in_phi (int): Normalized image phi resolution. Defaults to 512.
res_in_r (int): Normalized image r resolution. Defaults to 128.
skip_boundary_points (int, optional): Take every nth point from estimated boundaries when generating correspondences.
Defaults to 1.
intermediate_radiuses (t.Iterable[float], optional): Intermediate rings radiuses used to generate additional points for estimating transformations.
Defaults to np.linspace(0.0, 1.0, 8).
oversat_threshold (int, optional): threshold for masking over-satuated pixels. Defaults to 254.
"""
super().__init__(
res_in_phi=res_in_phi,
res_in_r=res_in_r,
skip_boundary_points=skip_boundary_points,
intermediate_radiuses=intermediate_radiuses,
oversat_threshold=oversat_threshold,
)
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using perspective transformation estimated for every region of an image separately.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points, dst_points = self._generate_correspondences(pupil_points, iris_points)
normalized_iris = NormalizedIris(
normalized_image=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=np.float32),
normalized_mask=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=bool),
)
<fim_suffix>
return normalized_iris
def _generate_correspondences(
self, pupil_points: np.ndarray, iris_points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate correspondences between points in original image and normalized image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with generated correspondences.
"""
pupil_points = pupil_points[:: self.params.skip_boundary_points]
iris_points = iris_points[:: self.params.skip_boundary_points]
src_points = []
for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring)
src_points = np.array(src_points)
num_rings, num_ring_points = src_points.shape[:2]
dst_xs, dst_ys = np.meshgrid(
np.linspace(0, self.params.res_in_phi, num_ring_points).astype(int),
np.linspace(0, self.params.res_in_r, num_rings).astype(int),
)
dst_points = np.array([dst_xs, dst_ys]).transpose((1, 2, 0))
return src_points, dst_points
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
def _correspondence_rois_coords(
self,
angle_idx: int,
ring_idx: int,
src_points: np.ndarray,
dst_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single correspondence ROIs between original image and normalized one based on angle index and ring index.
Args:
angle_idx (int): Boundary point angle index.
ring_idx (int): Intermediate ring index.
src_points (np.ndarray): All mapping points from an original image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
dst_points (np.ndarray): All mapping points from an normalized image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with extracted from src_points and dst_points ROIs.
"""
src_roi = src_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
dst_roi = dst_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
return src_roi.reshape(4, 2), dst_roi.reshape(4, 2)
@staticmethod
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
@staticmethod
def homogeneous2cartesian(points: np.ndarray) -> np.ndarray:
"""Convert points in homogeneous coordinates to cartesian coordinates.
Args:
points (np.ndarray): Points in homogeneous coordinates. Array should be in format: [[x values], [y values], [perspective scale values]].
Returns:
np.ndarray: Points in cartesian coordinates. Returned array will have format: [[x values], [y values]].
"""
points /= points[-1]
points = points[:2]
return points
<fim_middle>for angle_point_idx in range(src_points.shape[1] - 1):
for ring_idx in range(src_points.shape[0] - 1):
current_src, current_dst = self._correspondence_rois_coords(
angle_idx=angle_point_idx,
ring_idx=ring_idx,
src_points=src_points,
dst_points=dst_points,
)
xmin, ymin, xmax, ymax = self._bbox_coords(current_dst)
normalized_image_roi, normalized_mask_roi = self._normalize_roi(
original_image=image.img_data,
iris_mask=iris_mask,
src_points=current_src.astype(np.float32),
dst_points=current_dst.astype(np.float32),
normalize_roi_output_shape=(ymax - ymin, xmax - xmin),
)
normalized_iris.normalized_image[ymin:ymax, xmin:xmax] = normalized_image_roi
normalized_iris.normalized_mask[ymin:ymax, xmin:xmax] = normalized_mask_roi | for angle_point_idx in range(src_points.shape[1] - 1):
for ring_idx in range(src_points.shape[0] - 1):
current_src, current_dst = self._correspondence_rois_coords(
angle_idx=angle_point_idx,
ring_idx=ring_idx,
src_points=src_points,
dst_points=dst_points,
)
xmin, ymin, xmax, ymax = self._bbox_coords(current_dst)
normalized_image_roi, normalized_mask_roi = self._normalize_roi(
original_image=image.img_data,
iris_mask=iris_mask,
src_points=current_src.astype(np.float32),
dst_points=current_dst.astype(np.float32),
normalize_roi_output_shape=(ymax - ymin, xmax - xmin),
)
normalized_iris.normalized_image[ymin:ymax, xmin:xmax] = normalized_image_roi
normalized_iris.normalized_mask[ymin:ymax, xmin:xmax] = normalized_mask_roi | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/encoder/iris_encoder.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/conv_filter_bank.py
def run(self, normalization_output: NormalizedIris) -> IrisFilterResponse:
"""Apply filters to a normalized iris image.
Args:
normalization_output (NormalizedIris): Output of the normalization process.
Returns:
IrisFilterResponse: filter responses.
"""
iris_responses: List[np.ndarray] = []
mask_responses: List[np.ndarray] = []
for i_filter, i_schema in zip(self.params.filters, self.params.probe_schemas):
iris_response, mask_response = self._convolve(i_filter, i_schema, normalization_output)
iris_responses.append(iris_response)
mask_responses.append(mask_response)
return IrisFilterResponse(iris_responses=iris_responses, mask_responses=mask_responses)
# open-iris/src/iris/nodes/vectorization/contouring.py
def _filter_contours(self, contours: List[np.ndarray]) -> List[np.ndarray]:
"""Filter contours based on predefined filters.
Args:
contours (List[np.ndarray]): Contours list.
Returns:
List[np.ndarray]: Filtered list of contours.
"""
for filter_func in self.params.contour_filters:
contours = filter_func(contours)
return contours
# open-iris/src/iris/nodes/iris_response_refinement/fragile_bits_refinement.py
def run(self, iris_filter_response: IrisFilterResponse) -> IrisFilterResponse:
"""Generate refined IrisFilterResponse.
Args:
iris_filter_response (IrisFilterResponse): Filter bank response.
Returns:
IrisFilterResponse: Filter bank response.
"""
fragile_masks = []
for iris_response, iris_mask in zip(iris_filter_response.iris_responses, iris_filter_response.mask_responses):
if self.params.fragile_type == "cartesian":
mask_value_real = np.abs(np.real(iris_response)) >= self.params.value_threshold[0]
mask_value_imaginary = np.abs(np.imag(iris_response)) >= self.params.value_threshold[1]
mask_value = mask_value_real * mask_value_imaginary
if self.params.fragile_type == "polar":
iris_response_r = np.abs(iris_response)
iris_response_phi = np.angle(iris_response)
mask_value_r = iris_response_r >= self.params.value_threshold[0]
cos_mask = np.abs(np.cos(iris_response_phi)) <= np.abs(np.cos(self.params.value_threshold[1]))
sine_mask = np.abs(np.sin(iris_response_phi)) <= np.abs(np.cos(self.params.value_threshold[1]))
mask_value_phi = cos_mask * sine_mask
mask_value = mask_value_r * mask_value_phi
mask_value = mask_value * iris_mask
fragile_masks.append(mask_value)
return IrisFilterResponse(iris_responses=iris_filter_response.iris_responses, mask_responses=fragile_masks)
"""
from typing import List
import numpy as np
from pydantic import Field
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import IrisFilterResponse, IrisTemplate
class IrisEncoder(Algorithm):
"""Binarize IrisFilterResponse to generate iris code using Daugman's method.
Algorithm steps:
1) Binarize iris response by comparing real and imaginary parts to zero.
2) Binarize mask response by comparing real and imaginary parts to a given parameter: mask_threshold.
Reference:
[1] https://www.robots.ox.ac.uk/~az/lectures/est/iris.pdf.
"""
class Parameters(Algorithm.Parameters):
"""IrisEncoder parameters."""
mask_threshold: float = Field(..., ge=0.0, le=1.0)
__parameters_type__ = Parameters
def __init__(self, mask_threshold: float = 0.9, callbacks: List[Callback] = []) -> None:
"""Assign parameters.
Args:
mask_threshold (float): threshold to binarize mask_responses, in the range of [0,1]. Defaults to 0.9.
callbacks (List[Callback]): callbacks list. Defaults to [].
"""
super().__init__(mask_threshold=mask_threshold, callbacks=callbacks)
def run(self, response: IrisFilterResponse) -> IrisTemplate:
"""Encode iris code and mask code.
Args:
response (IrisFilterResponse): Filter responses.
Returns:
IrisTemplate: Final iris template.
"""
iris_codes: List[np.ndarray] = []
mask_codes: List[np.ndarray] = []
<fim_suffix>
return IrisTemplate(iris_codes=iris_codes, mask_codes=mask_codes)
<fim_middle>for iris_response, mask_response in zip(response.iris_responses, response.mask_responses):
mask_code = mask_response >= self.params.mask_threshold
iris_code = np.stack([iris_response.real > 0, iris_response.imag > 0], axis=-1)
mask_code = np.stack([mask_code, mask_code], axis=-1)
iris_codes.append(iris_code)
mask_codes.append(mask_code) | for iris_response, mask_response in zip(response.iris_responses, response.mask_responses):
mask_code = mask_response >= self.params.mask_threshold
iris_code = np.stack([iris_response.real > 0, iris_response.imag > 0], axis=-1)
mask_code = np.stack([mask_code, mask_code], axis=-1)
iris_codes.append(iris_code)
mask_codes.append(mask_code) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/image_filters/image_filter_interface.py
def compute_kernel_values(self) -> np.ndarray:
"""Compute values of filter kernel.
Returns:
np.ndarray: Computed kernel values.
"""
pass
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
<fim_suffix>
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle>if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope | if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
<fim_suffix>
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>if xmin == xmax and not xmax == img_w - 1:
xmax += 1 | if xmin == xmax and not xmax == img_w - 1:
xmax += 1 | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/probe_schemas/regular_probe_schema.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/validators.py
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
# open-iris/src/iris/orchestration/validators.py
def pipeline_metadata_version_check(cls: type, v: str, field: fields.ModelField) -> str:
"""Check if the version provided in the input config matches the current iris.__version__."""
if v != __version__:
raise IRISPipelineError(
f"Wrong config version. Cannot initialise IRISPipeline version {__version__} on a config file "
f"version {v}"
)
return v
# open-iris/src/iris/nodes/aggregation/noise_mask_union.py
def run(self, elements: List[NoiseMask]) -> NoiseMask:
"""Compute the union of a list of NoiseMask.
Args:
elements (List[NoiseMask]): input NoiseMasks.
Raises:
ValueError: if not all NoiseMask.mask do not have the same shape.
Returns:
NoiseMask: aggregated NoiseMasks
"""
if not all([mask.mask.shape == elements[0].mask.shape for mask in elements]):
raise ValueError(
f"Every NoiseMask.mask must have the same shape to be aggregated. "
f"Received {[mask.mask.shape for mask in elements]}"
)
noise_union = np.sum([mask.mask for mask in elements], axis=0) > 0
return NoiseMask(mask=noise_union)
"""
from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, PositiveInt, confloat, fields, validator
from iris.io.errors import ProbeSchemaError
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
class RegularProbeSchema(ProbeSchema):
"""Probe Schema for a regular Grid."""
class RegularProbeSchemaParameters(ProbeSchema.ProbeSchemaParameters):
"""RegularProbeSchema parameters."""
n_rows: int = Field(..., gt=1)
n_cols: int = Field(..., gt=1)
boundary_rho: List[confloat(ge=0.0, lt=1)]
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"],
List[confloat(ge=0.0, lt=1)],
]
image_shape: Optional[List[PositiveInt]]
@validator("boundary_rho", "boundary_phi")
def check_overlap(
cls: type,
v: Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]],
field: fields.ModelField,
) -> Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]:
"""Validate offsets to avoid overlap.
Args:
cls (type): Class type.
v (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ProbeSchemaError: Raises warning that offsets are together too large.
Returns:
Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]: The value for boundary_rho or boundary_phi respectively
"""
if isinstance(v, List):
if (v[0] + v[1]) >= 1:
raise ProbeSchemaError(
f"Offset for {field.name} on left and right corner must be a sum smaller 1, otherwise, offsets overlap."
)
return v
__parameters_type__ = RegularProbeSchemaParameters
def __init__(
self,
n_rows: int,
n_cols: int,
boundary_rho: List[float] = [0, 0.0625],
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]
] = "periodic-left",
image_shape: Optional[List[PositiveInt]] = None,
) -> None:
"""Assign parameters.
Args:
n_rows (int): Number of rows used, represents the number of different rho
values
n_cols (int): Number of columns used, represents the number of different
phi values
boundary_rho (List[float], optional): List with two values f1 and f2. The sampling goes from 0+f1 to 0-f2.
boundary_phi (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one column to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last column are placed with an offset to the
borders, that is half of the spacing of the two columns
- 'periodic-left': the first column is at the border of the bottom of the image, while
the last column is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
image_shape (list, optional): list containing the desired image dimensions. If provided, the function will throw
a warning if interpolation happens, i.e. if a kernel would be placed in between two pixels. Defaults to None.
"""
super().__init__(
n_rows=n_rows,
n_cols=n_cols,
boundary_rho=boundary_rho,
boundary_phi=boundary_phi,
image_shape=image_shape,
)
def generate_schema(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generate rhos and phis.
Return:
Tuple[np.ndarray, np.ndarray]: the rhos and phis.
"""
rho = np.linspace(
0 + self.params.boundary_rho[0], 1 - self.params.boundary_rho[1], self.params.n_rows, endpoint=True
)
if self.params.boundary_phi == "periodic-symmetric":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
phi = phi + (phi[1] - phi[0]) / 2
if self.params.boundary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
<fim_suffix>
phis, rhos = np.meshgrid(phi, rho)
rhos = rhos.flatten()
phis = phis.flatten()
# if image_shape provided: verify that values lie on pixel values
if self.params.image_shape is not None:
rhos_pixel_values = rhos * self.params.image_shape[0]
phis_pixel_values = phis * self.params.image_shape[1]
rho_pixel_values = np.logical_or(
np.less_equal(rhos_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), rhos_pixel_values % 1),
).all()
phi_pixel_values = np.logical_or(
np.less_equal(phis_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), phis_pixel_values % 1),
).all()
if not rho_pixel_values:
raise ProbeSchemaError(
f"Choice for n_rows {self.params.n_rows} leads to interpolation errors, please change input variables"
)
if not phi_pixel_values:
raise ProbeSchemaError(f"Choice for n_cols {self.params.n_cols} leads to interpolation errors")
return rhos, phis
@staticmethod
def find_suitable_n_rows(
row_min: int,
row_max: int,
length: int,
boundary_condition: Union[
Literal["periodic-symmetric", "periodic-left"],
List[float],
] = "periodic_symmetric",
) -> List[int]:
"""Find proper spacing of rows/columns for given boundary conditions (i.e. image size, offset. etc).
Args:
row_min (int): Starting value for row count
row_max (int): End value for row count
length (int): Pixels in the respective dimension
boundary_condition (Union[Literal["periodic-symmetric", "periodic-left"], List[float]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one row to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last row are placed with an offset to the
borders, that is half of the spacing of the two rows
- 'periodic-left': the first row is at the border of the bottom of the image, while
the last row is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
Returns:
list: List of all number of rows that does not lead to interpolation errors
"""
suitable_values: List[int] = []
# loop through all values and validate whether they are suitable
for counter in range(row_min, row_max + 1):
if boundary_condition == "periodic-symmetric":
values = np.linspace(0, 1, counter, endpoint=False)
values = values + (values[1] - values[0]) / 2
if boundary_condition == "periodic-left":
values = np.linspace(0, 1, counter, endpoint=False)
if isinstance(boundary_condition, List):
values = np.linspace(0 + boundary_condition[0], 1 - boundary_condition[1], counter, endpoint=True)
pixel_values = values * length
pixel_values_modulo = pixel_values % 1
no_interpolation = np.less_equal(pixel_values_modulo, 10 ** (-10))
no_interpolation = np.logical_or(no_interpolation, np.less_equal(1 - 10 ** (-10), pixel_values_modulo))
no_interpolation = no_interpolation.all()
if no_interpolation:
suitable_values.append(counter)
return suitable_values
<fim_middle>if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
) | if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/geometry_refinement/smoothing.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_estimation/linear_extrapolation.py
def _estimate(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Estimate a circle fit for a single contour.
Args:
vertices (np.ndarray): Contour's vertices.
center_xy (Tuple[float, float]): Contour's center position.
Returns:
np.ndarray: Estimated polygon.
"""
rhos, phis = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_rhos = np.concatenate([rhos, rhos, rhos])
padded_phis = np.concatenate([phis - 2 * np.pi, phis, phis + 2 * np.pi])
interpolated_phis = np.arange(padded_phis.min(), padded_phis.max(), np.radians(self.params.dphi))
interpolated_rhos = np.interp(interpolated_phis, xp=padded_phis, fp=padded_rhos, period=2 * np.pi)
mask = (interpolated_phis >= 0) & (interpolated_phis < 2 * np.pi)
interpolated_phis, interpolated_rhos = interpolated_phis[mask], interpolated_rhos[mask]
xs, ys = math.polar2cartesian(interpolated_rhos, interpolated_phis, *center_xy)
estimated_vertices = np.column_stack([xs, ys])
return estimated_vertices
# open-iris/src/iris/nodes/geometry_refinement/contour_interpolation.py
def _interpolate_polygon_points(self, polygon: np.ndarray, max_distance_between_points_px: float) -> np.ndarray:
"""Interpolate contours points, so that the distance between two is no greater than `self.params.max_distance_between_boundary_points` in pixel space.
Args:
polygon (np.ndarray): Contour polygons.
max_distance_between_points_px (float): `self.params.max_distance_between_boundary_points` expressed in pixel length relative to iris diameter.
Returns:
np.ndarray: Interpolated polygon points.
"""
previous_boundary = np.roll(polygon, shift=1, axis=0)
distances = np.linalg.norm(polygon - previous_boundary, axis=1)
num_points = np.ceil(distances / max_distance_between_points_px).astype(int)
x: List[np.ndarray] = []
y: List[np.ndarray] = []
for (x1, y1), (x2, y2), num_point in zip(previous_boundary, polygon, num_points):
x.append(np.linspace(x1, x2, num=num_point, endpoint=False))
y.append(np.linspace(y1, y2, num=num_point, endpoint=False))
new_boundary = np.stack([np.concatenate(x), np.concatenate(y)], axis=1)
_, indices = np.unique(new_boundary, axis=0, return_index=True)
new_boundary = new_boundary[np.sort(indices)]
return new_boundary
# open-iris/src/iris/utils/math.py
def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:
"""Convert xs and ys cartesian coordinates to polar coordinates.
Args:
xs (np.ndarray): x values.
ys (np.ndarray): y values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).
"""
x_rel: np.ndarray = xs - center_x
y_rel: np.ndarray = ys - center_y
C = np.vectorize(complex)(x_rel, y_rel)
rho = np.abs(C)
phi = np.angle(C) % (2 * np.pi)
return rho, phi
"""
from typing import List, Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import GeometryRefinementError
from iris.utils import math
class Smoothing(Algorithm):
"""Implementation of contour smoothing algorithm.
Algorithm steps:
1) Map iris/pupil points to polar space based on estimated iris/pupil centers.
2) Smooth iris/pupil contour by applying 1D convolution with rolling median kernel approach.
3) Map points back to cartesian space from polar space.
"""
class Parameters(Algorithm.Parameters):
"""Smoothing parameters class."""
dphi: float = Field(..., gt=0.0, lt=360.0)
kernel_size: float = Field(..., gt=0.0, lt=360.0)
gap_threshold: float = Field(..., gt=0.0, lt=360.0)
__parameters_type__ = Parameters
def __init__(self, dphi: float = 1.0, kernel_size: float = 10.0, gap_threshold: float = 10.0) -> None:
"""Assign parameters.
Args:
dphi (float, optional): phi angle delta used to sample points while doing smoothing by interpolation. Defaults to 1.0.
kernel_size (float, optional): Rolling median kernel size expressed in radians. Final kernel size is computed as a quotient of kernel_size and dphi. Defaults to 10.0.
gap_threshold (float, optional): Gap threshold distance. Defaults to None. Defaults to 10.0.
"""
super().__init__(dphi=dphi, kernel_size=kernel_size, gap_threshold=gap_threshold)
@property
def kernel_offset(self) -> int:
"""Kernel offset (distance from kernel center to border) property used when smoothing with rolling median. If a quotient is less then 1 then kernel size equal to 1 is returned.
Returns:
int: Kernel size.
"""
return max(1, int((np.radians(self.params.kernel_size) / np.radians(self.params.dphi))) // 2)
def run(self, polygons: GeometryPolygons, eye_centers: EyeCenters) -> GeometryPolygons:
"""Perform smoothing refinement.
Args:
polygons (GeometryPolygons): Contours to refine.
eye_centers (EyeCenters): Eye center used when performing a coordinates mapping from cartesian space to polar space.
Returns:
GeometryPolygons: Smoothed contours.
"""
pupil_arcs = self._smooth(polygons.pupil_array, (eye_centers.pupil_x, eye_centers.pupil_y))
iris_arcs = self._smooth(polygons.iris_array, (eye_centers.iris_x, eye_centers.iris_y))
return GeometryPolygons(pupil_array=pupil_arcs, iris_array=iris_arcs, eyeball_array=polygons.eyeball_array)
def _smooth(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour.
Args:
polygon (np.ndarray): Contour to smooth.
center_xy (Tuple[float, float]): Contour's center.
Returns:
np.ndarray: Smoothed contour's vertices.
"""
arcs, num_gaps = self._cut_into_arcs(polygon, center_xy)
arcs = (
self._smooth_circular_shape(arcs[0], center_xy)
if num_gaps == 0
else np.vstack([self._smooth_arc(arc, center_xy) for arc in arcs if len(arc) >= 2])
)
return arcs
def _cut_into_arcs(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> Tuple[List[np.ndarray], int]:
"""Cut contour into arcs.
Args:
polygon (np.ndarray): Contour polygon.
center_xy (Tuple[float, float]): Polygon's center.
Returns:
Tuple[List[np.ndarray], int]: Tuple with: (list of list of vertices, number of gaps detected in a contour).
"""
rho, phi = math.cartesian2polar(polygon[:, 0], polygon[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
differences = np.abs(phi - np.roll(phi, -1))
# True distance between first and last point
differences[-1] = 2 * np.pi - differences[-1]
gap_indices = np.argwhere(differences > np.radians(self.params.gap_threshold)).flatten()
<fim_suffix>
gap_indices += 1
phi, rho = np.split(phi, gap_indices), np.split(rho, gap_indices)
arcs = [
np.column_stack(math.polar2cartesian(rho_coords, phi_coords, *center_xy))
for rho_coords, phi_coords in zip(rho, phi)
]
# Connect arc which lies between 0 and 2π.
if len(arcs) == gap_indices.size + 1:
arcs[0] = np.vstack([arcs[0], arcs[-1]])
arcs = arcs[:-1]
return arcs, gap_indices.size
def _smooth_arc(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth a single contour arc.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
phi, rho = self._sort_two_arrays(phi, rho)
idx = self._find_start_index(phi)
offset = phi[idx]
relative_phi = (phi - offset) % (2 * np.pi)
smoothed_relative_phi, smoothed_rho = self._smooth_array(relative_phi, rho)
smoothed_phi = (smoothed_relative_phi + offset) % (2 * np.pi)
x_smoothed, y_smoothed = math.polar2cartesian(smoothed_rho, smoothed_phi, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
def _smooth_array(self, phis: np.ndarray, rhos: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Smooth coordinates expressed in polar space.
Args:
phis (np.ndarray): phi values.
rhos (np.ndarray): rho values.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with smoothed coordinates (phis, rhos).
"""
interpolated_phi = np.arange(min(phis), max(phis), np.radians(self.params.dphi))
interpolated_rho = np.interp(interpolated_phi, xp=phis, fp=rhos, period=2 * np.pi)
smoothed_rho = self._rolling_median(interpolated_rho, self.kernel_offset)
smoothed_phi = interpolated_phi[self.kernel_offset : -self.kernel_offset]
return smoothed_phi, smoothed_rho
def _sort_two_arrays(self, first_list: np.ndarray, second_list: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Sort both numpy arrays based on values from the first_list.
Args:
first_list (np.ndarray): First array.
second_list (np.ndarray): Second array.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with (sorted first array, sorted second array).
"""
zipped_lists = zip(first_list, second_list)
sorted_pairs = sorted(zipped_lists)
sorted_tuples = zip(*sorted_pairs)
first_list, second_list = [list(sorted_tuple) for sorted_tuple in sorted_tuples]
return np.array(first_list), np.array(second_list)
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
def _rolling_median(self, signal: np.ndarray, kernel_offset: int) -> np.ndarray:
"""Compute rolling median of a 1D signal.
Args:
signal (np.ndarray): Signal values.
kernel_size (int): Kernel size.
Raises:
GeometryRefinementError: Raised if signal is not 1D.
Returns:
np.ndarray: Rolling median result.
"""
if signal.ndim != 1:
raise GeometryRefinementError("Smoothing._rolling_median only works for 1d arrays.")
stacked_signals: List[np.ndarray] = []
for i in range(-kernel_offset, kernel_offset + 1):
stacked_signals.append(np.roll(signal, i))
stacked_signals = np.stack(stacked_signals)
rolling_median = np.median(stacked_signals, axis=0)
rolling_median = rolling_median[kernel_offset:-kernel_offset]
return rolling_median
<fim_middle>if gap_indices.size < 2:
return [polygon], gap_indices.size | if gap_indices.size < 2:
return [polygon], gap_indices.size | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/io/dataclasses.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/callbacks/pipeline_trace.py
def get(self, result_name: str) -> Any:
"""Get result_name result.
Args:
result_name (str): Result name.
Raises:
PipelineCallTraceStorageError: Raised if result_name is not found.
Returns:
Any: Result object.
"""
if result_name not in self._storage.keys():
raise PipelineCallTraceStorageError(f"Unknown result name: {result_name}")
return self._storage[result_name]
# open-iris/src/iris/io/validators.py
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
# open-iris/src/iris/orchestration/output_builders.py
def __safe_serialize(object: Optional[ImmutableModel]) -> Optional[Dict[str, Any]]:
"""Serialize an object.
Args:
object (Optional[ImmutableModel]): Object to be serialized.
Raises:
NotImplementedError: Raised if object is not serializable.
Returns:
Optional[Dict[str, Any]]: Serialized object.
"""
if object is None:
return None
elif isinstance(object, ImmutableModel):
return object.serialize()
elif isinstance(object, (list, tuple)):
return [__safe_serialize(sub_object) for sub_object in object]
else:
raise NotImplementedError(f"Object of type {type(object)} is not serializable.")
"""
from __future__ import annotations
from typing import Any, Dict, List, Literal, Tuple
import numpy as np
from pydantic import Field, NonNegativeInt, root_validator, validator
from iris.io import validators as v
from iris.io.class_configs import ImmutableModel
from iris.utils import math
class IRImage(ImmutableModel):
"""Data holder for input IR image."""
img_data: np.ndarray
eye_side: Literal["left", "right"]
@property
def height(self) -> int:
"""Return IR image's height.
Return:
int: image height.
"""
return self.img_data.shape[0]
@property
def width(self) -> int:
"""Return IR image's width.
Return:
int: image width.
"""
return self.img_data.shape[1]
def serialize(self) -> Dict[str, Any]:
"""Serialize IRImage object.
Returns:
Dict[str, Any]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, Any]) -> IRImage:
"""Deserialize IRImage object.
Args:
data (Dict[str, Any]): Serialized object to dict.
Returns:
IRImage: Deserialized object.
"""
return IRImage(**data)
class SegmentationMap(ImmutableModel):
"""Data holder for the segmentation models predictions."""
predictions: np.ndarray
index2class: Dict[NonNegativeInt, str]
_is_segmap_3_dimensions = validator("predictions", allow_reuse=True)(v.is_array_n_dimensions(3))
@root_validator(pre=True, allow_reuse=True)
def _check_segmap_shape_and_consistency(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check that the number of classes equals the depth of the segmentation map.
Args:
values (Dict[str, Any]): Dictionary with segmap and classes {param_name: data}.
Raises:
ValueError: Raised if there is resolution mismatch between image and mask.
Returns:
Dict[str, Any]: Unmodified values parameter passed for further processing.
"""
if values["predictions"].shape[2] != len(values["index2class"]):
segmap_depth, nb_classes = values["predictions"].shape, len(values["index2class"])
raise ValueError(
f"{cls.__name__}: mismatch between predictions shape {segmap_depth} and classes length {nb_classes}."
)
return values
@property
def height(self) -> int:
"""Return segmap's height.
Return:
int: segmap height.
"""
return self.predictions.shape[0]
@property
def width(self) -> int:
"""Return segmap's width.
Return:
int: segmap width.
"""
return self.predictions.shape[1]
@property
def nb_classes(self) -> int:
"""Return the number of classes of the segmentation map (i.e. nb channels).
Return:
int: number of classes in the segmentation map.
"""
return self.predictions.shape[2]
def __eq__(self, other: object) -> bool:
"""Check if two SegmentationMap objects are equal.
Args:
other (object): Second object to compare.
Returns:
bool: Comparison result.
"""
if not isinstance(other, SegmentationMap):
return False
return self.index2class == other.index2class and np.allclose(self.predictions, other.predictions)
def index_of(self, class_name: str) -> int:
"""Get class index based on its name.
Args:
class_name (str): Class name
Raises:
ValueError: Index of a class
Returns:
int: Raised if `class_name` not found in `index2class` dictionary.
"""
for index, name in self.index2class.items():
<fim_suffix>
raise ValueError(f"Index for the `{class_name}` not found")
def serialize(self) -> Dict[str, Any]:
"""Serialize SegmentationMap object.
Returns:
Dict[str, Any]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, Any]) -> SegmentationMap:
"""Deserialize SegmentationMap object.
Args:
data (Dict[str, Any]): Serialized object to dict.
Returns:
SegmentationMap: Deserialized object.
"""
return SegmentationMap(**data)
class GeometryMask(ImmutableModel):
"""Data holder for the geometry raster."""
pupil_mask: np.ndarray
iris_mask: np.ndarray
eyeball_mask: np.ndarray
_is_mask_2D = validator("*", allow_reuse=True)(v.is_array_n_dimensions(2))
_is_binary = validator("*", allow_reuse=True)(v.is_binary)
@property
def filled_eyeball_mask(self) -> np.ndarray:
"""Fill eyeball mask.
Returns:
np.ndarray: Eyeball mask with filled iris/pupil "holes".
"""
binary_maps = np.zeros(self.eyeball_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
binary_maps += self.eyeball_mask
return binary_maps.astype(bool)
@property
def filled_iris_mask(self) -> np.ndarray:
"""Fill iris mask.
Returns:
np.ndarray: Iris mask with filled pupil "holes".
"""
binary_maps = np.zeros(self.iris_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
return binary_maps.astype(bool)
def serialize(self) -> Dict[str, Any]:
"""Serialize GeometryMask object.
Returns:
Dict[str, Any]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, Any]) -> GeometryMask:
"""Deserialize GeometryMask object.
Args:
data (Dict[str, Any]): Serialized object to dict.
Returns:
GeometryMask: Deserialized object.
"""
return GeometryMask(**data)
class NoiseMask(ImmutableModel):
"""Data holder for the refined geometry masks."""
mask: np.ndarray
_is_mask_2D = validator("mask", allow_reuse=True)(v.is_array_n_dimensions(2))
_is_binary = validator("*", allow_reuse=True)(v.is_binary)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize NoiseMask object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, np.ndarray]) -> NoiseMask:
"""Deserialize NoiseMask object.
Args:
data (Dict[str, np.ndarray]): Serialized object to dict.
Returns:
NoiseMask: Deserialized object.
"""
return NoiseMask(**data)
class GeometryPolygons(ImmutableModel):
"""Data holder for the refined geometry polygons. Input np.ndarrays are mandatorily converted to np.float32 dtype for compatibility with some downstream tasks such as MomentsOfArea."""
pupil_array: np.ndarray
iris_array: np.ndarray
eyeball_array: np.ndarray
_is_list_of_points = validator("*", allow_reuse=True)(v.is_list_of_points)
_convert_dtype = validator("*", allow_reuse=True)(v.to_dtype_float32)
@property
def pupil_diameter(self) -> float:
"""Return pupil diameter.
Returns:
float: pupil diameter.
"""
return math.estimate_diameter(self.pupil_array)
@property
def iris_diameter(self) -> float:
"""Return iris diameter.
Returns:
float: iris diameter.
"""
return math.estimate_diameter(self.iris_array)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize GeometryPolygons object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
return {"pupil": self.pupil_array, "iris": self.iris_array, "eyeball": self.eyeball_array}
@staticmethod
def deserialize(data: Dict[str, np.ndarray]) -> GeometryPolygons:
"""Deserialize GeometryPolygons object.
Args:
data (Dict[str, np.ndarray]): Serialized object to dict.
Returns:
GeometryPolygons: Deserialized object.
"""
data = {"pupil_array": data["pupil"], "iris_array": data["iris"], "eyeball_array": data["eyeball"]}
return GeometryPolygons(**data)
class EyeOrientation(ImmutableModel):
"""Data holder for the eye orientation. The angle must be comprised between -pi/2 (included) and pi/2 (excluded)."""
angle: float = Field(..., ge=-np.pi / 2, lt=np.pi / 2)
def serialize(self) -> float:
"""Serialize EyeOrientation object.
Returns:
float: Serialized object.
"""
return self.angle
@staticmethod
def deserialize(data: float) -> EyeOrientation:
"""Deserialize EyeOrientation object.
Args:
data (float): Serialized object to float.
Returns:
EyeOrientation: Deserialized object.
"""
return EyeOrientation(angle=data)
class EyeCenters(ImmutableModel):
"""Data holder for eye's centers."""
pupil_x: float
pupil_y: float
iris_x: float
iris_y: float
@property
def center_distance(self) -> float:
"""Return distance between pupil and iris center.
Return:
float: center distance.
"""
return np.linalg.norm([self.iris_x - self.pupil_x, self.iris_y - self.pupil_y])
def serialize(self) -> Dict[str, Tuple[float]]:
"""Serialize EyeCenters object.
Returns:
Dict[str, Tuple[float]]: Serialized object.
"""
return {"iris_center": (self.iris_x, self.iris_y), "pupil_center": (self.pupil_x, self.pupil_y)}
@staticmethod
def deserialize(data: Dict[str, Tuple[float]]) -> EyeCenters:
"""Deserialize EyeCenters object.
Args:
data (Dict[str, Tuple[float]]): Serialized object to dict.
Returns:
EyeCenters: Deserialized object.
"""
data = {
"pupil_x": data["pupil_center"][0],
"pupil_y": data["pupil_center"][1],
"iris_x": data["iris_center"][0],
"iris_y": data["iris_center"][1],
}
return EyeCenters(**data)
class Offgaze(ImmutableModel):
"""Data holder for offgaze score."""
score: float = Field(..., ge=0.0, le=1.0)
def serialize(self) -> float:
"""Serialize Offgaze object.
Returns:
float: Serialized object.
"""
return self.score
@staticmethod
def deserialize(data: float) -> Offgaze:
"""Deserialize Offgaze object.
Args:
data (float): Serialized object to float.
Returns:
Offgaze: Deserialized object.
"""
return Offgaze(score=data)
class PupilToIrisProperty(ImmutableModel):
"""Data holder for pupil-ro-iris ratios."""
pupil_to_iris_diameter_ratio: float = Field(..., gt=0, lt=1)
pupil_to_iris_center_dist_ratio: float = Field(..., ge=0, lt=1)
def serialize(self) -> Dict[str, float]:
"""Serialize PupilToIrisProperty object.
Returns:
Dict[str, float]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, float]) -> PupilToIrisProperty:
"""Deserialize PupilToIrisProperty object.
Args:
data (Dict[str, float]): Serialized object to dict.
Returns:
PupilToIrisProperty: Deserialized object.
"""
return PupilToIrisProperty(**data)
class Landmarks(ImmutableModel):
"""Data holder for eye's landmarks."""
pupil_landmarks: np.ndarray
iris_landmarks: np.ndarray
eyeball_landmarks: np.ndarray
_is_list_of_points = validator("*", allow_reuse=True)(v.is_list_of_points)
def serialize(self) -> Dict[str, List[float]]:
"""Serialize Landmarks object.
Returns:
Dict[str, List[float]]: Serialized object.
"""
return {
"pupil": self.pupil_landmarks.tolist(),
"iris": self.iris_landmarks.tolist(),
"eyeball": self.eyeball_landmarks.tolist(),
}
@staticmethod
def deserialize(data: Dict[str, List[float]]) -> Landmarks:
"""Deserialize Landmarks object.
Args:
data (Dict[str, List[float]]): Serialized object to dict.
Returns:
Landmarks: Deserialized object.
"""
data = {
"pupil_landmarks": np.array(data["pupil"]),
"iris_landmarks": np.array(data["iris"]),
"eyeball_landmarks": np.array(data["eyeball"]),
}
return Landmarks(**data)
class BoundingBox(ImmutableModel):
"""Data holder for eye's bounding box."""
x_min: float
y_min: float
x_max: float
y_max: float
_is_valid_bbox = root_validator(pre=True, allow_reuse=True)(v.is_valid_bbox)
def serialize(self) -> Dict[str, float]:
"""Serialize BoundingBox object.
Returns:
Dict[str, float]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, float]) -> BoundingBox:
"""Deserialize BoundingBox object.
Args:
data (Dict[str, float]): Serialized object to dict.
Returns:
BoundingBox: Deserialized object.
"""
return BoundingBox(**data)
class NormalizedIris(ImmutableModel):
"""Data holder for the normalized iris images."""
normalized_image: np.ndarray
normalized_mask: np.ndarray
_is_array_2D = validator("*", allow_reuse=True)(v.is_array_n_dimensions(2))
_is_binary = validator("normalized_mask", allow_reuse=True)(v.is_binary)
_img_mask_shape_match = root_validator(pre=True, allow_reuse=True)(
v.are_shapes_equal("normalized_image", "normalized_mask")
)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize NormalizedIris object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, np.ndarray]) -> NormalizedIris:
"""Deserialize NormalizedIris object.
Args:
data (Dict[str, np.ndarray]): Serialized object to dict.
Returns:
NormalizedIris: Deserialized object.
"""
return NormalizedIris(**data)
class IrisFilterResponse(ImmutableModel):
"""Data holder for filter bank response with associated mask."""
iris_responses: List[np.ndarray]
mask_responses: List[np.ndarray]
_responses_mask_shape_match = root_validator(pre=True, allow_reuse=True)(
v.are_all_shapes_equal("iris_responses", "mask_responses")
)
def serialize(self) -> Dict[str, List[np.ndarray]]:
"""Serialize IrisFilterResponse object.
Returns:
Dict[str, List[np.ndarray]]: Serialized object.
"""
return self.dict(by_alias=True)
@staticmethod
def deserialize(data: Dict[str, List[np.ndarray]]) -> IrisFilterResponse:
"""Deserialize IrisFilterResponse object.
Args:
data (Dict[str, List[np.ndarray]]): Serialized object to dict.
Returns:
IrisFilterResponse: Deserialized object.
"""
return IrisFilterResponse(**data)
class IrisTemplate(ImmutableModel):
"""Data holder for final iris template with mask."""
iris_codes: List[np.ndarray]
mask_codes: List[np.ndarray]
_responses_mask_shape_match = root_validator(pre=True, allow_reuse=True)(
v.are_all_shapes_equal("iris_codes", "mask_codes")
)
_is_binary = validator("*", allow_reuse=True, each_item=True)(v.is_binary)
def serialize(self) -> Dict[str, np.ndarray]:
"""Serialize IrisTemplate object.
Returns:
Dict[str, np.ndarray]: Serialized object.
"""
stacked_iris_codes = np.stack(self.iris_codes)
stacked_iris_codes = stacked_iris_codes.transpose(1, 2, 0, 3)
stacked_mask_codes = np.stack(self.mask_codes)
stacked_mask_codes = stacked_mask_codes.transpose(1, 2, 0, 3)
return {
"iris_codes": stacked_iris_codes,
"mask_codes": stacked_mask_codes,
}
class EyeOcclusion(ImmutableModel):
"""Data holder for the eye occlusion."""
visible_fraction: float = Field(..., ge=-0.0, le=1.0)
def serialize(self) -> float:
"""Serialize EyeOcclusion object.
Returns:
float: Serialized object.
"""
return self.visible_fraction
@staticmethod
def deserialize(data: float) -> EyeOcclusion:
"""Deserialize EyeOcclusion object.
Args:
data (float): Serialized object to float.
Returns:
EyeOcclusion: Deserialized object.
"""
return EyeOcclusion(visible_fraction=data)
<fim_middle>if name == class_name:
return index | if name == class_name:
return index | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
<fim_suffix>
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>if xmin == xmax and not xmax == img_w - 1:
xmax += 1 | if xmin == xmax and not xmax == img_w - 1:
xmax += 1 | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/probe_schemas/regular_probe_schema.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/validators.py
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
# open-iris/src/iris/orchestration/validators.py
def pipeline_metadata_version_check(cls: type, v: str, field: fields.ModelField) -> str:
"""Check if the version provided in the input config matches the current iris.__version__."""
if v != __version__:
raise IRISPipelineError(
f"Wrong config version. Cannot initialise IRISPipeline version {__version__} on a config file "
f"version {v}"
)
return v
# open-iris/src/iris/nodes/aggregation/noise_mask_union.py
def run(self, elements: List[NoiseMask]) -> NoiseMask:
"""Compute the union of a list of NoiseMask.
Args:
elements (List[NoiseMask]): input NoiseMasks.
Raises:
ValueError: if not all NoiseMask.mask do not have the same shape.
Returns:
NoiseMask: aggregated NoiseMasks
"""
if not all([mask.mask.shape == elements[0].mask.shape for mask in elements]):
raise ValueError(
f"Every NoiseMask.mask must have the same shape to be aggregated. "
f"Received {[mask.mask.shape for mask in elements]}"
)
noise_union = np.sum([mask.mask for mask in elements], axis=0) > 0
return NoiseMask(mask=noise_union)
"""
from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, PositiveInt, confloat, fields, validator
from iris.io.errors import ProbeSchemaError
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
class RegularProbeSchema(ProbeSchema):
"""Probe Schema for a regular Grid."""
class RegularProbeSchemaParameters(ProbeSchema.ProbeSchemaParameters):
"""RegularProbeSchema parameters."""
n_rows: int = Field(..., gt=1)
n_cols: int = Field(..., gt=1)
boundary_rho: List[confloat(ge=0.0, lt=1)]
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"],
List[confloat(ge=0.0, lt=1)],
]
image_shape: Optional[List[PositiveInt]]
@validator("boundary_rho", "boundary_phi")
def check_overlap(
cls: type,
v: Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]],
field: fields.ModelField,
) -> Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]:
"""Validate offsets to avoid overlap.
Args:
cls (type): Class type.
v (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ProbeSchemaError: Raises warning that offsets are together too large.
Returns:
Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]: The value for boundary_rho or boundary_phi respectively
"""
if isinstance(v, List):
if (v[0] + v[1]) >= 1:
raise ProbeSchemaError(
f"Offset for {field.name} on left and right corner must be a sum smaller 1, otherwise, offsets overlap."
)
return v
__parameters_type__ = RegularProbeSchemaParameters
def __init__(
self,
n_rows: int,
n_cols: int,
boundary_rho: List[float] = [0, 0.0625],
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]
] = "periodic-left",
image_shape: Optional[List[PositiveInt]] = None,
) -> None:
"""Assign parameters.
Args:
n_rows (int): Number of rows used, represents the number of different rho
values
n_cols (int): Number of columns used, represents the number of different
phi values
boundary_rho (List[float], optional): List with two values f1 and f2. The sampling goes from 0+f1 to 0-f2.
boundary_phi (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one column to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last column are placed with an offset to the
borders, that is half of the spacing of the two columns
- 'periodic-left': the first column is at the border of the bottom of the image, while
the last column is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
image_shape (list, optional): list containing the desired image dimensions. If provided, the function will throw
a warning if interpolation happens, i.e. if a kernel would be placed in between two pixels. Defaults to None.
"""
super().__init__(
n_rows=n_rows,
n_cols=n_cols,
boundary_rho=boundary_rho,
boundary_phi=boundary_phi,
image_shape=image_shape,
)
def generate_schema(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generate rhos and phis.
Return:
Tuple[np.ndarray, np.ndarray]: the rhos and phis.
"""
rho = np.linspace(
0 + self.params.boundary_rho[0], 1 - self.params.boundary_rho[1], self.params.n_rows, endpoint=True
)
if self.params.boundary_phi == "periodic-symmetric":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
phi = phi + (phi[1] - phi[0]) / 2
<fim_suffix>
if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
)
phis, rhos = np.meshgrid(phi, rho)
rhos = rhos.flatten()
phis = phis.flatten()
# if image_shape provided: verify that values lie on pixel values
if self.params.image_shape is not None:
rhos_pixel_values = rhos * self.params.image_shape[0]
phis_pixel_values = phis * self.params.image_shape[1]
rho_pixel_values = np.logical_or(
np.less_equal(rhos_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), rhos_pixel_values % 1),
).all()
phi_pixel_values = np.logical_or(
np.less_equal(phis_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), phis_pixel_values % 1),
).all()
if not rho_pixel_values:
raise ProbeSchemaError(
f"Choice for n_rows {self.params.n_rows} leads to interpolation errors, please change input variables"
)
if not phi_pixel_values:
raise ProbeSchemaError(f"Choice for n_cols {self.params.n_cols} leads to interpolation errors")
return rhos, phis
@staticmethod
def find_suitable_n_rows(
row_min: int,
row_max: int,
length: int,
boundary_condition: Union[
Literal["periodic-symmetric", "periodic-left"],
List[float],
] = "periodic_symmetric",
) -> List[int]:
"""Find proper spacing of rows/columns for given boundary conditions (i.e. image size, offset. etc).
Args:
row_min (int): Starting value for row count
row_max (int): End value for row count
length (int): Pixels in the respective dimension
boundary_condition (Union[Literal["periodic-symmetric", "periodic-left"], List[float]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one row to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last row are placed with an offset to the
borders, that is half of the spacing of the two rows
- 'periodic-left': the first row is at the border of the bottom of the image, while
the last row is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
Returns:
list: List of all number of rows that does not lead to interpolation errors
"""
suitable_values: List[int] = []
# loop through all values and validate whether they are suitable
for counter in range(row_min, row_max + 1):
if boundary_condition == "periodic-symmetric":
values = np.linspace(0, 1, counter, endpoint=False)
values = values + (values[1] - values[0]) / 2
if boundary_condition == "periodic-left":
values = np.linspace(0, 1, counter, endpoint=False)
if isinstance(boundary_condition, List):
values = np.linspace(0 + boundary_condition[0], 1 - boundary_condition[1], counter, endpoint=True)
pixel_values = values * length
pixel_values_modulo = pixel_values % 1
no_interpolation = np.less_equal(pixel_values_modulo, 10 ** (-10))
no_interpolation = np.logical_or(no_interpolation, np.less_equal(1 - 10 ** (-10), pixel_values_modulo))
no_interpolation = no_interpolation.all()
if no_interpolation:
suitable_values.append(counter)
return suitable_values
<fim_middle>if self.params.boundary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False) | if self.params.boundary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/io/validators.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/dataclasses.py
def _check_segmap_shape_and_consistency(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check that the number of classes equals the depth of the segmentation map.
Args:
values (Dict[str, Any]): Dictionary with segmap and classes {param_name: data}.
Raises:
ValueError: Raised if there is resolution mismatch between image and mask.
Returns:
Dict[str, Any]: Unmodified values parameter passed for further processing.
"""
if values["predictions"].shape[2] != len(values["index2class"]):
segmap_depth, nb_classes = values["predictions"].shape, len(values["index2class"])
raise ValueError(
f"{cls.__name__}: mismatch between predictions shape {segmap_depth} and classes length {nb_classes}."
)
return values
# open-iris/src/iris/orchestration/validators.py
def pipeline_metadata_version_check(cls: type, v: str, field: fields.ModelField) -> str:
"""Check if the version provided in the input config matches the current iris.__version__."""
if v != __version__:
raise IRISPipelineError(
f"Wrong config version. Cannot initialise IRISPipeline version {__version__} on a config file "
f"version {v}"
)
return v
# open-iris/src/iris/orchestration/output_builders.py
def __get_error(call_trace: PipelineCallTraceStorage) -> Optional[Dict[str, Any]]:
"""Produce error output from a call_trace.
Args:
call_trace (PipelineCallTraceStorage): Pipeline call trace.
Returns:
Optional[Dict[str, Any]]: Optional error dictionary if such occured.
"""
exception = call_trace.get_error()
error = None
if isinstance(exception, Exception):
error = {
"error_type": type(exception).__name__,
"message": str(exception),
"traceback": "".join(traceback.format_tb(exception.__traceback__)),
}
return error
"""
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
<fim_suffix>
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
<fim_middle>if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
) | if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
<fim_suffix>
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>if ymin == ymax and not ymax == img_h - 1:
ymax += 1 | if ymin == ymax and not ymax == img_h - 1:
ymax += 1 | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/segmentation/onnx_multilabel_segmentation.py
def _preprocess(self, image: np.ndarray) -> Dict[str, np.ndarray]:
"""Preprocess image so that inference with ONNX model is possible.
Args:
image (np.ndarray): Infrared image object.
Returns:
Dict[str, np.ndarray]: Dictionary with wrapped input name and image data {input_name: image_data}.
"""
nn_input = image.copy()
nn_input = self.preprocess(nn_input, self.params.input_resolution, self.params.input_num_channels)
return {self.params.session.get_inputs()[0].name: nn_input.astype(np.float32)}
# open-iris/src/iris/nodes/vectorization/contouring.py
def filter_polygon_areas(
polygons: List[np.ndarray], rel_tr: NonNegativeFloat = 0.03, abs_tr: NonNegativeFloat = 0.0
) -> List[np.ndarray]:
"""Filter out polygons whose area is below either an absolute threshold or a fraction of the largest area.
Args:
polygons (List[np.ndarray]): List of polygons to filter.
rel_tr (NonNegativeFloat, optional): Relative threshold. Defaults to 0.03.
abs_tr (NonNegativeFloat, optional): Absolute threshold. Defaults to 0.0.
Returns:
List[np.ndarray]: Filtered polygons' list.
"""
areas = [area(polygon) if len(polygon) > 2 else 1.0 for polygon in polygons]
area_factors = np.array(areas) / np.max(areas)
filtered_polygons = [
polygon
for area, area_factor, polygon in zip(areas, area_factors, polygons)
if area > abs_tr and area_factor > rel_tr
]
return filtered_polygons
# open-iris/src/iris/io/validators.py
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
<fim_suffix>
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle>if norm_real > 0:
kernel_values.real /= norm_real | if norm_real > 0:
kernel_values.real /= norm_real | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/image_filters/image_filter_interface.py
def compute_kernel_values(self) -> np.ndarray:
"""Compute values of filter kernel.
Returns:
np.ndarray: Computed kernel values.
"""
pass
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
<fim_suffix>
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle># normalize kernel values | # normalize kernel values | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/utils/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/tests/unit_tests/utils.py
def area_of_circular_segment(circle_radius: float, delta_height: float) -> float:
"""Compute the area of a circular segment (see source for definition).
Source: https://en.wikipedia.org/wiki/Circular_segment
Args:
circle_radius (float): Radius of the circle (R).
delta_height (float): distance between the center of the segment and the base of the secant, i.e. apothem (d).
Returns:
float: area of the circular segment
"""
if delta_height > circle_radius:
return 0.0
area = circle_radius**2 * np.arccos(delta_height / circle_radius) - delta_height * np.sqrt(
circle_radius**2 - delta_height**2
)
return area
# open-iris/src/iris/nodes/eye_properties_estimation/moment_of_area.py
def run(self, geometries: GeometryPolygons) -> EyeOrientation:
"""Compute the eye orientation using the second order moments or the eyeball.
WARNING: cv2.moments MUST only receive np.float32 arrays. Otherwise, the array will be interpreted as a sparse
matrix instead of a list of points. See https://github.com/opencv/opencv/issues/6643#issuecomment-224204774.
Args:
geometries (GeometryPolygons): segmentation map used for eye orientation estimation.
Raises:
EyeOrientationEstimationError if the eyeball's eccentricity is below `eccentricity_threshold` i.e. if the eyeball shape is not circular enough to reliably estimate the orientation.
Returns:
EyeOrientation: eye orientation object.
"""
moments = cv2.moments(geometries.eyeball_array)
eccentricity = math_utils.eccentricity(moments)
if eccentricity < self.params.eccentricity_threshold:
raise EyeOrientationEstimationError(
"The eyeball is too circular to reliably determine its orientation. "
f"Computed eccentricity: {eccentricity}. Threshold: {self.params.eccentricity_threshold}"
)
orientation = math_utils.orientation(moments)
return EyeOrientation(angle=orientation)
# open-iris/setup.py
def load_description() -> str:
"""Load description from README.md file.
Returns:
str: A package description.
"""
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
if not os.path.exists(readme_filepath):
return ""
with open(readme_filepath, "r", encoding="UTF-8") as fh:
long_description = fh.read()
return long_description
"""
import math
from typing import Dict, Tuple
import numpy as np
def area(array: np.ndarray) -> float:
"""Shoelace formula for simple polygon area calculation.
WARNING: This formula only works for simple polygons, i.e planar polygon without self-intersection nor holes.
These conditions are not checked within this function.
Args:
array (np.ndarray): np array representing a polygon as a list of points, i.e. of shape (_, 2).
Raises:
ValueError: if the input array does not have shape (_, 2)
Returns:
float: Polygon area
References:
[1] https://en.wikipedia.org/wiki/Shoelace_formula
[2] https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
"""
if len(array.shape) != 2 or array.shape[1] != 2:
raise ValueError(f"Unable to determine the area of a polygon with shape {array.shape}. Expecting (_, 2).")
xs, ys = array.T
area = 0.5 * np.abs(np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))
return float(area)
def estimate_diameter(polygon: np.ndarray) -> float:
"""Estimates the diameter of an arbitrary arc by evaluating the maximum distance between any two points on the arc.
Args:
polygon (np.ndarray): Polygon points.
Returns:
float: Estimated diameter length.
Reference:
[1] https://sparrow.dev/pairwise-distance-in-numpy/
"""
return float(np.linalg.norm(polygon[:, None, :] - polygon[None, :, :], axis=-1).max())
def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:
"""Convert xs and ys cartesian coordinates to polar coordinates.
Args:
xs (np.ndarray): x values.
ys (np.ndarray): y values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).
"""
x_rel: np.ndarray = xs - center_x
y_rel: np.ndarray = ys - center_y
C = np.vectorize(complex)(x_rel, y_rel)
rho = np.abs(C)
phi = np.angle(C) % (2 * np.pi)
return rho, phi
def polar2cartesian(
rhos: np.ndarray, phis: np.ndarray, center_x: float, center_y: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert polar coordinates to cartesian coordinates.
Args:
rho (np.ndarray): rho values.
phi (np.ndarray): phi values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (xs, ys).
"""
xs = center_x + rhos * np.cos(phis)
ys = center_y + rhos * np.sin(phis)
return xs, ys
def orientation(moments: Dict[str, float]) -> float:
"""Compute the main orientation of a contour or a binary image given its precomputed cv2 moments.
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
float: Main orientation of the shape. The orientation is a float in [-pi/2, pi/2[ representing the signed angle from the x axis.
"""
# Edge case of null denominator
if (moments["mu20"] - moments["mu02"]) == 0:
if moments["mu11"] == 0:
orientation = 0.0
else:
orientation = math.copysign(np.pi / 4, moments["mu11"])
else:
# General formula
orientation = 0.5 * np.arctan(2 * moments["mu11"] / (moments["mu20"] - moments["mu02"]))
if (moments["mu20"] - moments["mu02"]) < 0:
orientation += np.pi / 2
# Restricting the angle to [-pi/2, pi/2[
orientation = np.mod(orientation + np.pi / 2, np.pi) - np.pi / 2
return orientation
def eccentricity(moments: Dict[str, float]) -> float:
r"""Compute the eccentricity of a contour or a binary image given its precomputed cv2 moments.
The eccentricity is a number in [0, 1] which caracterises the "roundness" or "linearity" of a shape.
A perfect circle will have an eccentricity of 0, and an infinite line an eccentricity of 1.
For ellipses, the eccentricity is calculated as :math:`\frac{\sqrt{a^2 - b^2}}{a^2}`
with a (resp. b) the semi-major (resp. -minor) axis of the ellipses.
For `mu20 + mu02 == 0`, i.e. perfect line, the max theoretical value (1.0) is returned
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
eccentricity (float): the eccentricity of the contour or binary map.
Reference:
[1] https://t1.daumcdn.net/cfile/tistory/15425F4150F4EBFC19
"""
if moments["mu20"] + moments["mu02"] == 0:
return 1.0
# fmt: off
eccentricity = ((moments["mu20"] - moments["mu02"]) ** 2 + 4 * moments["mu11"] ** 2) / (moments["mu20"] + moments["mu02"]) ** 2
<fim_suffix>
return eccentricity
def apply_weights_1d(scores_1d: np.ndarray, weights_1d: np.ndarray) -> float:
"""Apply weights for score fusion.
Args:
scores_1d (np.ndarray): scores to be fused.
weights_1d (np.ndarray): weights.
Raises:
ValueError: if the input 1d arrays do not have the same length.
Returns:
float: fused score.
"""
if len(scores_1d) != len(weights_1d):
raise ValueError("Unable to apply weights. Dimension is different between scores and weights.")
if len(weights_1d) == 0:
raise ValueError("Unable to apply weights. Empty arrays.")
if np.sum(weights_1d) == 0:
raise ValueError("Unable to apply weights. Sum of weights is zero.")
weighted_score = np.sum(np.multiply(scores_1d, weights_1d))
return weighted_score / np.sum(weights_1d)
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
<fim_middle># fmt: on | # fmt: on | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/image_filters/image_filter_interface.py
def compute_kernel_values(self) -> np.ndarray:
"""Compute values of filter kernel.
Returns:
np.ndarray: Computed kernel values.
"""
pass
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
<fim_suffix>
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle># calculate kernel values | # calculate kernel values | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
# open-iris/src/iris/utils/math.py
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
"""
from typing import Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import EyeCentersEstimationError
class BisectorsMethod(Algorithm):
"""Implementation of eye's center estimation algorithm using bisectors method for finding a circle center.
This algorithm samples a given number of bisectors from the pupil and iris polygons, and averages their intersection
to produce the polygon center. This method is robust against noise in the polygons, making it a good choice for
non-perfect shapes. It is also robust to polygons missing parts of the circle arc, making it a good choice for
partially-occluded shapes.
LIMITATIONS:
The iris and pupil can be approximated to circles, when the user is properly gazing at the camera.
This requires that the cases of off-gaze have already been filtered out.
"""
class Parameters(Algorithm.Parameters):
"""Default Parameters for BisectorsMethod algorithm."""
num_bisectors: int = Field(..., gt=0)
min_distance_between_sector_points: float = Field(..., gt=0.0, lt=1.0)
max_iterations: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(
self,
num_bisectors: int = 100,
min_distance_between_sector_points: float = 0.75,
max_iterations: int = 50,
) -> None:
"""Assign parameters.
Args:
num_bisectors (int, optional): Number of bisectors.. Defaults to 100.
min_distance_between_sector_points (float, optional): Minimum distance between sectors expressed as a fractional value of a circular shape diameter. Defaults to 0.75.
max_iterations (int, optional): Max iterations for bisector search.. Defaults to 50.
"""
super().__init__(
num_bisectors=num_bisectors,
min_distance_between_sector_points=min_distance_between_sector_points,
max_iterations=max_iterations,
)
def run(self, geometries: GeometryPolygons) -> EyeCenters:
"""Estimate eye's iris and pupil centers.
Args:
geometries (GeometryPolygons): Geometry polygons.
Returns:
EyeCenters: Eye's centers object.
"""
pupil_center_x, pupil_center_y = self._find_center_coords(geometries.pupil_array, geometries.pupil_diameter)
iris_center_x, iris_center_y = self._find_center_coords(geometries.iris_array, geometries.iris_diameter)
return EyeCenters(pupil_x=pupil_center_x, pupil_y=pupil_center_y, iris_x=iris_center_x, iris_y=iris_center_y)
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
def _calculate_perpendicular_bisectors(
self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.
A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.
Args:
polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.
min_distance_between_sector_points_in_px (float): Minimum distance between sector points.
Raises:
EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!
Returns:
Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.
"""
np.random.seed(142857)
bisectors_first_points = np.empty([0, 2])
bisectors_second_points = np.empty([0, 2])
for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
mask = norms > min_distance_between_sector_points_in_px
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
)
bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]
bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]
bisectors_center = (bisectors_first_points + bisectors_second_points) / 2
# Flip xs with ys and flip sign of on of them to create a 90deg rotation
inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)
inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]
<fim_suffix>
norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)
inv_bisectors_center_slope[:, 0] /= norm
inv_bisectors_center_slope[:, 1] /= norm
first_bisectors_point = bisectors_center - inv_bisectors_center_slope
second_bisectors_point = bisectors_center + inv_bisectors_center_slope
return first_bisectors_point, second_bisectors_point
def _find_best_intersection(self, fst_points: np.ndarray, sec_points: np.ndarray) -> Tuple[float, float]:
"""fst_points and sec_points are NxD arrays defining N lines. D is the dimension of the space.
This function returns the least squares intersection of the N lines from the system given by eq. 13 in
http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf.
Args:
fst_points (np.ndarray): First bisectors points.
sec_points (np.ndarray): Second bisectors points.
Returns:
Tuple[float, float]: Best intersection point.
Reference:
[1] http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf
"""
norm_bisectors = (sec_points - fst_points) / np.linalg.norm(sec_points - fst_points, axis=1)[:, np.newaxis]
# Generate the array of all projectors I - n*n.T
projections = np.eye(norm_bisectors.shape[1]) - norm_bisectors[:, :, np.newaxis] * norm_bisectors[:, np.newaxis]
# Generate R matrix and q vector
R = projections.sum(axis=0)
q = (projections @ fst_points[:, :, np.newaxis]).sum(axis=0)
# Solve the least squares problem for the intersection point p: Rp = q
p = np.linalg.lstsq(R, q, rcond=None)[0]
intersection_x, intersection_y = p
return intersection_x.item(), intersection_y.item()
<fim_middle># Add perpendicular vector to center and normalize | # Add perpendicular vector to center and normalize | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/geometry_estimation/lsq_ellipse_fit_with_refinement.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/contour_points_filter.py
def _filter_polygon_points(self, forbidden_touch_map: np.ndarray, polygon_points: np.ndarray) -> np.ndarray:
"""Filter polygon's points.
Args:
forbidden_touch_map (np.ndarray): Forbidden touch map. If value of an element is greater then 0 then it means that point is to close to noise or eyeball.
polygon_points (np.ndarray): Polygon's points.
Returns:
np.ndarray: Filtered polygon's points.
"""
valid_points = [not forbidden_touch_map[y, x] for x, y in np.round(polygon_points).astype(int)]
return polygon_points[valid_points]
# open-iris/src/iris/utils/common.py
def contour_to_mask(vertices: np.ndarray, mask_shape: Tuple[int, int]) -> np.ndarray:
"""Generate binary mask based on polygon's vertices.
Args:
vertices (np.ndarray): Vertices points array.
mask_shape (Tuple[int, int]): Tuple with output mask dimension (weight, height).
Returns:
np.ndarray: Binary mask.
"""
width, height = mask_shape
mask = np.zeros(shape=(height, width, 3))
vertices = np.round(vertices).astype(np.int32)
cv2.fillPoly(mask, pts=[vertices], color=(255, 0, 0))
mask = mask[..., 0]
mask = mask.astype(bool)
return mask
# open-iris/src/iris/nodes/validators/object_validators.py
def _check_pupil_point_is_inside_iris(self, point: np.ndarray, polygon_pts: np.ndarray) -> bool:
"""Check if pupil point is inside iris polygon.
Reference:
[1] https://www.geeksforgeeks.org/how-to-check-if-a-given-point-lies-inside-a-polygon/
Args:
point (np.ndarray): Point x, y.
polygon_sides (np.ndarray): Polygon points.
Returns:
bool: Check result.
"""
num_iris_points = len(polygon_pts)
polygon_sides = [
(polygon_pts[i % num_iris_points], polygon_pts[(i + 1) % num_iris_points]) for i in range(num_iris_points)
]
x, y = point
to_right_ray = (point, np.array([float("inf"), y]))
to_left_ray = (np.array([-float("inf"), y]), point)
right_ray_intersections, left_ray_intersections = 0, 0
for poly_side in polygon_sides:
if self._is_ray_intersecting_with_side(to_right_ray, poly_side, is_ray_pointing_to_left=False):
right_ray_intersections += 1
if self._is_ray_intersecting_with_side(to_left_ray, poly_side, is_ray_pointing_to_left=True):
left_ray_intersections += 1
return right_ray_intersections % 2 != 0 or left_ray_intersections % 2 != 0
"""
from typing import List
import cv2
import numpy as np
from pydantic import Field
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import GeometryPolygons
class LSQEllipseFitWithRefinement(Algorithm):
"""Algorithm that implements least square ellipse fit with iris polygon refinement by finding points to refine by computing euclidean distance.
Algorithm steps:
1) Use OpenCV's fitEllipse method to fit an ellipse to predicted iris and pupil polygons.
2) Refine predicted pupil polygons points to their original location to prevent location precision loss for those points which were predicted by semseg algorithm.
"""
class Parameters(Algorithm.Parameters):
"""Parameters of least square ellipse fit extrapolation algorithm."""
dphi: float = Field(..., gt=0.0, lt=360.0)
__parameters_type__ = Parameters
def __init__(self, dphi: float = 1.0, callbacks: List[Callback] = []) -> None:
"""Assign parameters.
Args:
dphi (float, optional): Angle's delta. Defaults to 1.0.
callbacks (List[Callback], optional): List of callbacks. Defaults to [].
"""
super().__init__(dphi=dphi, callbacks=callbacks)
def run(self, input_polygons: GeometryPolygons) -> GeometryPolygons:
"""Estimate extrapolated polygons with OpenCV's method fitEllipse.
Args:
input_polygons (GeometryPolygons): Smoothed polygons.
Returns:
GeometryPolygons: Extrapolated polygons.
"""
extrapolated_pupil = self._extrapolate(input_polygons.pupil_array)
extrapolated_iris = self._extrapolate(input_polygons.iris_array)
for point in input_polygons.pupil_array:
extrapolated_pupil[self._find_correspondence(point, extrapolated_pupil)] = point
return GeometryPolygons(
pupil_array=extrapolated_pupil, iris_array=extrapolated_iris, eyeball_array=input_polygons.eyeball_array
)
def _extrapolate(self, polygon_points: np.ndarray) -> np.ndarray:
"""Perform extrapolation for points in an array.
Args:
polygon_points (np.ndarray): Smoothed polygon ready for applying extrapolation algorithm on it.
Returns:
np.ndarray: Estimated extrapolated polygon.
"""
(x0, y0), (a, b), theta = cv2.fitEllipse(polygon_points)
extrapolated_polygon = LSQEllipseFitWithRefinement.parametric_ellipsis(
a / 2, b / 2, x0, y0, np.radians(theta), round(360 / self.params.dphi)
)
<fim_suffix>
roll_amount = round((-theta - 90) / self.params.dphi)
extrapolated_polygon = np.flip(np.roll(extrapolated_polygon, roll_amount, axis=0), axis=0)
return extrapolated_polygon
def _find_correspondence(self, src_point: np.ndarray, dst_points: np.ndarray) -> int:
"""Find correspondence with Euclidean distance.
Args:
src_point (np.ndarray): Source points.
dst_points (np.ndarray): Destination points.
Returns:
int: Source point index the closes one to the destination points.
"""
src_x, src_y = src_point
distance = (dst_points[:, 1] - src_y) ** 2 + (dst_points[:, 0] - src_x) ** 2
idx = np.where(distance == distance.min())[0]
return idx
@staticmethod
def parametric_ellipsis(a: float, b: float, x0: float, y0: float, theta: float, nb_step: int = 100) -> np.ndarray:
"""Given the parameters of a general ellipsis, returns an array of points in this ellipsis.
Args:
a (float): Major axis length.
b (float): Minor axis length.
x0 (float): x offset.
y0 (float): y offset.
theta (float): rotation of the ellipsis.
nb_step (int): number of points in the ellipsis.
Returns:
np.ndarray: points within the ellipsis.
"""
t = np.linspace(0, 2 * np.pi, nb_step)
x_coords = x0 + b * np.cos(t) * np.sin(-theta) + a * np.sin(t) * np.cos(-theta)
y_coords = y0 + b * np.cos(t) * np.cos(-theta) - a * np.sin(t) * np.sin(-theta)
return np.array([x_coords, y_coords]).T
<fim_middle># Rotate such that 0 degree is parallel with x-axis and array is clockwise | # Rotate such that 0 degree is parallel with x-axis and array is clockwise | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/image_filters/image_filter_interface.py
def compute_kernel_values(self) -> np.ndarray:
"""Compute values of filter kernel.
Returns:
np.ndarray: Computed kernel values.
"""
pass
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
<fim_suffix>
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle># convert to polar coordinates | # convert to polar coordinates | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/probe_schemas/regular_probe_schema.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/io/validators.py
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
# open-iris/src/iris/orchestration/validators.py
def pipeline_metadata_version_check(cls: type, v: str, field: fields.ModelField) -> str:
"""Check if the version provided in the input config matches the current iris.__version__."""
if v != __version__:
raise IRISPipelineError(
f"Wrong config version. Cannot initialise IRISPipeline version {__version__} on a config file "
f"version {v}"
)
return v
# open-iris/src/iris/nodes/aggregation/noise_mask_union.py
def run(self, elements: List[NoiseMask]) -> NoiseMask:
"""Compute the union of a list of NoiseMask.
Args:
elements (List[NoiseMask]): input NoiseMasks.
Raises:
ValueError: if not all NoiseMask.mask do not have the same shape.
Returns:
NoiseMask: aggregated NoiseMasks
"""
if not all([mask.mask.shape == elements[0].mask.shape for mask in elements]):
raise ValueError(
f"Every NoiseMask.mask must have the same shape to be aggregated. "
f"Received {[mask.mask.shape for mask in elements]}"
)
noise_union = np.sum([mask.mask for mask in elements], axis=0) > 0
return NoiseMask(mask=noise_union)
"""
from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, PositiveInt, confloat, fields, validator
from iris.io.errors import ProbeSchemaError
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
class RegularProbeSchema(ProbeSchema):
"""Probe Schema for a regular Grid."""
class RegularProbeSchemaParameters(ProbeSchema.ProbeSchemaParameters):
"""RegularProbeSchema parameters."""
n_rows: int = Field(..., gt=1)
n_cols: int = Field(..., gt=1)
boundary_rho: List[confloat(ge=0.0, lt=1)]
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"],
List[confloat(ge=0.0, lt=1)],
]
image_shape: Optional[List[PositiveInt]]
@validator("boundary_rho", "boundary_phi")
def check_overlap(
cls: type,
v: Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]],
field: fields.ModelField,
) -> Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]:
"""Validate offsets to avoid overlap.
Args:
cls (type): Class type.
v (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ProbeSchemaError: Raises warning that offsets are together too large.
Returns:
Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]: The value for boundary_rho or boundary_phi respectively
"""
if isinstance(v, List):
if (v[0] + v[1]) >= 1:
raise ProbeSchemaError(
f"Offset for {field.name} on left and right corner must be a sum smaller 1, otherwise, offsets overlap."
)
return v
__parameters_type__ = RegularProbeSchemaParameters
def __init__(
self,
n_rows: int,
n_cols: int,
boundary_rho: List[float] = [0, 0.0625],
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]
] = "periodic-left",
image_shape: Optional[List[PositiveInt]] = None,
) -> None:
"""Assign parameters.
Args:
n_rows (int): Number of rows used, represents the number of different rho
values
n_cols (int): Number of columns used, represents the number of different
phi values
boundary_rho (List[float], optional): List with two values f1 and f2. The sampling goes from 0+f1 to 0-f2.
boundary_phi (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one column to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last column are placed with an offset to the
borders, that is half of the spacing of the two columns
- 'periodic-left': the first column is at the border of the bottom of the image, while
the last column is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
image_shape (list, optional): list containing the desired image dimensions. If provided, the function will throw
a warning if interpolation happens, i.e. if a kernel would be placed in between two pixels. Defaults to None.
"""
super().__init__(
n_rows=n_rows,
n_cols=n_cols,
boundary_rho=boundary_rho,
boundary_phi=boundary_phi,
image_shape=image_shape,
)
def generate_schema(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generate rhos and phis.
Return:
Tuple[np.ndarray, np.ndarray]: the rhos and phis.
"""
rho = np.linspace(
0 + self.params.boundary_rho[0], 1 - self.params.boundary_rho[1], self.params.n_rows, endpoint=True
)
if self.params.boundary_phi == "periodic-symmetric":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
phi = phi + (phi[1] - phi[0]) / 2
if self.params.boundary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
)
phis, rhos = np.meshgrid(phi, rho)
rhos = rhos.flatten()
phis = phis.flatten()
<fim_suffix>
if self.params.image_shape is not None:
rhos_pixel_values = rhos * self.params.image_shape[0]
phis_pixel_values = phis * self.params.image_shape[1]
rho_pixel_values = np.logical_or(
np.less_equal(rhos_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), rhos_pixel_values % 1),
).all()
phi_pixel_values = np.logical_or(
np.less_equal(phis_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), phis_pixel_values % 1),
).all()
if not rho_pixel_values:
raise ProbeSchemaError(
f"Choice for n_rows {self.params.n_rows} leads to interpolation errors, please change input variables"
)
if not phi_pixel_values:
raise ProbeSchemaError(f"Choice for n_cols {self.params.n_cols} leads to interpolation errors")
return rhos, phis
@staticmethod
def find_suitable_n_rows(
row_min: int,
row_max: int,
length: int,
boundary_condition: Union[
Literal["periodic-symmetric", "periodic-left"],
List[float],
] = "periodic_symmetric",
) -> List[int]:
"""Find proper spacing of rows/columns for given boundary conditions (i.e. image size, offset. etc).
Args:
row_min (int): Starting value for row count
row_max (int): End value for row count
length (int): Pixels in the respective dimension
boundary_condition (Union[Literal["periodic-symmetric", "periodic-left"], List[float]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one row to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last row are placed with an offset to the
borders, that is half of the spacing of the two rows
- 'periodic-left': the first row is at the border of the bottom of the image, while
the last row is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
Returns:
list: List of all number of rows that does not lead to interpolation errors
"""
suitable_values: List[int] = []
# loop through all values and validate whether they are suitable
for counter in range(row_min, row_max + 1):
if boundary_condition == "periodic-symmetric":
values = np.linspace(0, 1, counter, endpoint=False)
values = values + (values[1] - values[0]) / 2
if boundary_condition == "periodic-left":
values = np.linspace(0, 1, counter, endpoint=False)
if isinstance(boundary_condition, List):
values = np.linspace(0 + boundary_condition[0], 1 - boundary_condition[1], counter, endpoint=True)
pixel_values = values * length
pixel_values_modulo = pixel_values % 1
no_interpolation = np.less_equal(pixel_values_modulo, 10 ** (-10))
no_interpolation = np.logical_or(no_interpolation, np.less_equal(1 - 10 ** (-10), pixel_values_modulo))
no_interpolation = no_interpolation.all()
if no_interpolation:
suitable_values.append(counter)
return suitable_values
<fim_middle># if image_shape provided: verify that values lie on pixel values | # if image_shape provided: verify that values lie on pixel values | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:
"""Smooth arc in a form of a circular shape.
Args:
vertices (np.ndarray): Arc's vertices.
center_xy (Tuple[float, float]): Center of an entire contour.
Returns:
np.ndarray: Smoothed arc's vertices.
"""
rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)
padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])
padded_rho = np.concatenate([rho, rho, rho])
smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)
mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)
rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]
x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)
return np.column_stack([x_smoothed, y_smoothed])
# open-iris/src/iris/utils/math.py
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
"""
from typing import Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import EyeCentersEstimationError
class BisectorsMethod(Algorithm):
"""Implementation of eye's center estimation algorithm using bisectors method for finding a circle center.
This algorithm samples a given number of bisectors from the pupil and iris polygons, and averages their intersection
to produce the polygon center. This method is robust against noise in the polygons, making it a good choice for
non-perfect shapes. It is also robust to polygons missing parts of the circle arc, making it a good choice for
partially-occluded shapes.
LIMITATIONS:
The iris and pupil can be approximated to circles, when the user is properly gazing at the camera.
This requires that the cases of off-gaze have already been filtered out.
"""
class Parameters(Algorithm.Parameters):
"""Default Parameters for BisectorsMethod algorithm."""
num_bisectors: int = Field(..., gt=0)
min_distance_between_sector_points: float = Field(..., gt=0.0, lt=1.0)
max_iterations: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(
self,
num_bisectors: int = 100,
min_distance_between_sector_points: float = 0.75,
max_iterations: int = 50,
) -> None:
"""Assign parameters.
Args:
num_bisectors (int, optional): Number of bisectors.. Defaults to 100.
min_distance_between_sector_points (float, optional): Minimum distance between sectors expressed as a fractional value of a circular shape diameter. Defaults to 0.75.
max_iterations (int, optional): Max iterations for bisector search.. Defaults to 50.
"""
super().__init__(
num_bisectors=num_bisectors,
min_distance_between_sector_points=min_distance_between_sector_points,
max_iterations=max_iterations,
)
def run(self, geometries: GeometryPolygons) -> EyeCenters:
"""Estimate eye's iris and pupil centers.
Args:
geometries (GeometryPolygons): Geometry polygons.
Returns:
EyeCenters: Eye's centers object.
"""
pupil_center_x, pupil_center_y = self._find_center_coords(geometries.pupil_array, geometries.pupil_diameter)
iris_center_x, iris_center_y = self._find_center_coords(geometries.iris_array, geometries.iris_diameter)
return EyeCenters(pupil_x=pupil_center_x, pupil_y=pupil_center_y, iris_x=iris_center_x, iris_y=iris_center_y)
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
def _calculate_perpendicular_bisectors(
self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.
A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.
Args:
polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.
min_distance_between_sector_points_in_px (float): Minimum distance between sector points.
Raises:
EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!
Returns:
Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.
"""
np.random.seed(142857)
bisectors_first_points = np.empty([0, 2])
bisectors_second_points = np.empty([0, 2])
for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
mask = norms > min_distance_between_sector_points_in_px
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
)
bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]
bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]
bisectors_center = (bisectors_first_points + bisectors_second_points) / 2
<fim_suffix>
inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)
inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]
# Add perpendicular vector to center and normalize
norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)
inv_bisectors_center_slope[:, 0] /= norm
inv_bisectors_center_slope[:, 1] /= norm
first_bisectors_point = bisectors_center - inv_bisectors_center_slope
second_bisectors_point = bisectors_center + inv_bisectors_center_slope
return first_bisectors_point, second_bisectors_point
def _find_best_intersection(self, fst_points: np.ndarray, sec_points: np.ndarray) -> Tuple[float, float]:
"""fst_points and sec_points are NxD arrays defining N lines. D is the dimension of the space.
This function returns the least squares intersection of the N lines from the system given by eq. 13 in
http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf.
Args:
fst_points (np.ndarray): First bisectors points.
sec_points (np.ndarray): Second bisectors points.
Returns:
Tuple[float, float]: Best intersection point.
Reference:
[1] http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf
"""
norm_bisectors = (sec_points - fst_points) / np.linalg.norm(sec_points - fst_points, axis=1)[:, np.newaxis]
# Generate the array of all projectors I - n*n.T
projections = np.eye(norm_bisectors.shape[1]) - norm_bisectors[:, :, np.newaxis] * norm_bisectors[:, np.newaxis]
# Generate R matrix and q vector
R = projections.sum(axis=0)
q = (projections @ fst_points[:, :, np.newaxis]).sum(axis=0)
# Solve the least squares problem for the intersection point p: Rp = q
p = np.linalg.lstsq(R, q, rcond=None)[0]
intersection_x, intersection_y = p
return intersection_x.item(), intersection_y.item()
<fim_middle># Flip xs with ys and flip sign of on of them to create a 90deg rotation | # Flip xs with ys and flip sign of on of them to create a 90deg rotation | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/image_filters/image_filter_interface.py
def compute_kernel_values(self) -> np.ndarray:
"""Compute values of filter kernel.
Returns:
np.ndarray: Computed kernel values.
"""
pass
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
<fim_suffix>
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle># Step 1: calculate mean value of Gabor Wavelet | # Step 1: calculate mean value of Gabor Wavelet | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/iris_response/image_filters/image_filter_interface.py
def compute_kernel_values(self) -> np.ndarray:
"""Compute values of filter kernel.
Returns:
np.ndarray: Computed kernel values.
"""
pass
# open-iris/src/iris/nodes/geometry_refinement/smoothing.py
def _find_start_index(self, phi: np.ndarray) -> int:
"""Find the start index by checking the largest gap. phi needs to be sorted.
Args:
phi (np.ndarray): phi angle values.
Raises:
GeometryRefinementError: Raised if phi values are not sorted ascendingly.
Returns:
int: Index value.
"""
if not np.all((phi - np.roll(phi, 1))[1:] >= 0):
raise GeometryRefinementError("Smoothing._find_start_index phi must be sorted ascendingly!")
phi_tmp = np.concatenate(([phi[-1] - 2 * np.pi], phi, [phi[0] + 2 * np.pi]))
phi_tmp_left_neighbor = np.roll(phi_tmp, 1)
dphi = (phi_tmp - phi_tmp_left_neighbor)[1:-1]
largest_gap_index = np.argmax(dphi)
return int(largest_gap_index)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
"""
from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
<fim_suffix>
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle># calculate carrier and envelope | # calculate carrier and envelope | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/perspective_normalization.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/io/dataclasses.py
def filled_iris_mask(self) -> np.ndarray:
"""Fill iris mask.
Returns:
np.ndarray: Iris mask with filled pupil "holes".
"""
binary_maps = np.zeros(self.iris_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
return binary_maps.astype(bool)
# open-iris/src/iris/nodes/normalization/common.py
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
"""
from typing import Collection, List, Tuple
import cv2
import numpy as np
from pydantic import Field, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeOrientation, GeometryPolygons, IRImage, NoiseMask, NormalizedIris
from iris.io.errors import NormalizationError
from iris.nodes.normalization.common import correct_orientation, generate_iris_mask, interpolate_pixel_intensity
class PerspectiveNormalization(Algorithm):
"""Implementation of a normalization algorithm which uses perspective transformation to map image pixels.
Algorithm steps:
1) Create a grid of trapezoids around iris in original image based on following algorithm parameters: res_in_phi, res_in_r, intermediate_radiuses.
2) Create a grid of corresponding to each trapezoid rectangles in normalized image.
3) For each corresponding trapezoid, rectangle pair compute perspective matrix to estimate normalized image pixel location in an original image location.
4) Map each normalized image pixel to original image pixel based on estimated perspective matrix and perform bilinear interpolation if necessary.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for PerspectiveNormalization."""
res_in_phi: int = Field(..., gt=0)
res_in_r: int = Field(..., gt=0)
skip_boundary_points: int = Field(..., gt=0)
intermediate_radiuses: Collection[float]
oversat_threshold: int = Field(..., gt=0)
@validator("intermediate_radiuses")
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
__parameters_type__ = Parameters
def __init__(
self,
res_in_phi: int = 512,
res_in_r: int = 128,
skip_boundary_points: int = 1,
intermediate_radiuses: Collection[float] = np.linspace(0.0, 1.0, 8),
oversat_threshold: int = 254,
) -> None:
"""Assign parameters.
Args:
res_in_phi (int): Normalized image phi resolution. Defaults to 512.
res_in_r (int): Normalized image r resolution. Defaults to 128.
skip_boundary_points (int, optional): Take every nth point from estimated boundaries when generating correspondences.
Defaults to 1.
intermediate_radiuses (t.Iterable[float], optional): Intermediate rings radiuses used to generate additional points for estimating transformations.
Defaults to np.linspace(0.0, 1.0, 8).
oversat_threshold (int, optional): threshold for masking over-satuated pixels. Defaults to 254.
"""
super().__init__(
res_in_phi=res_in_phi,
res_in_r=res_in_r,
skip_boundary_points=skip_boundary_points,
intermediate_radiuses=intermediate_radiuses,
oversat_threshold=oversat_threshold,
)
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using perspective transformation estimated for every region of an image separately.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points, dst_points = self._generate_correspondences(pupil_points, iris_points)
normalized_iris = NormalizedIris(
normalized_image=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=np.float32),
normalized_mask=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=bool),
)
for angle_point_idx in range(src_points.shape[1] - 1):
for ring_idx in range(src_points.shape[0] - 1):
current_src, current_dst = self._correspondence_rois_coords(
angle_idx=angle_point_idx,
ring_idx=ring_idx,
src_points=src_points,
dst_points=dst_points,
)
xmin, ymin, xmax, ymax = self._bbox_coords(current_dst)
normalized_image_roi, normalized_mask_roi = self._normalize_roi(
original_image=image.img_data,
iris_mask=iris_mask,
src_points=current_src.astype(np.float32),
dst_points=current_dst.astype(np.float32),
normalize_roi_output_shape=(ymax - ymin, xmax - xmin),
)
normalized_iris.normalized_image[ymin:ymax, xmin:xmax] = normalized_image_roi
normalized_iris.normalized_mask[ymin:ymax, xmin:xmax] = normalized_mask_roi
return normalized_iris
def _generate_correspondences(
self, pupil_points: np.ndarray, iris_points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate correspondences between points in original image and normalized image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with generated correspondences.
"""
pupil_points = pupil_points[:: self.params.skip_boundary_points]
iris_points = iris_points[:: self.params.skip_boundary_points]
src_points = []
for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring)
src_points = np.array(src_points)
num_rings, num_ring_points = src_points.shape[:2]
dst_xs, dst_ys = np.meshgrid(
np.linspace(0, self.params.res_in_phi, num_ring_points).astype(int),
np.linspace(0, self.params.res_in_r, num_rings).astype(int),
)
dst_points = np.array([dst_xs, dst_ys]).transpose((1, 2, 0))
return src_points, dst_points
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
<fim_suffix>
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
def _correspondence_rois_coords(
self,
angle_idx: int,
ring_idx: int,
src_points: np.ndarray,
dst_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single correspondence ROIs between original image and normalized one based on angle index and ring index.
Args:
angle_idx (int): Boundary point angle index.
ring_idx (int): Intermediate ring index.
src_points (np.ndarray): All mapping points from an original image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
dst_points (np.ndarray): All mapping points from an normalized image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with extracted from src_points and dst_points ROIs.
"""
src_roi = src_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
dst_roi = dst_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
return src_roi.reshape(4, 2), dst_roi.reshape(4, 2)
@staticmethod
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
@staticmethod
def homogeneous2cartesian(points: np.ndarray) -> np.ndarray:
"""Convert points in homogeneous coordinates to cartesian coordinates.
Args:
points (np.ndarray): Points in homogeneous coordinates. Array should be in format: [[x values], [y values], [perspective scale values]].
Returns:
np.ndarray: Points in cartesian coordinates. Returned array will have format: [[x values], [y values]].
"""
points /= points[-1]
points = points[:2]
return points
<fim_middle>try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x] | try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x] | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
<fim_suffix>
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>try:
return image[int(pixel_y), int(pixel_x)] | try:
return image[int(pixel_y), int(pixel_x)] | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
<fim_suffix>
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>try:
return image[int(pixel_y), int(pixel_x)] | try:
return image[int(pixel_y), int(pixel_x)] | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
<fim_suffix>
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>except IndexError:
return 0.0 | except IndexError:
return 0.0 | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/perspective_normalization.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/io/dataclasses.py
def filled_iris_mask(self) -> np.ndarray:
"""Fill iris mask.
Returns:
np.ndarray: Iris mask with filled pupil "holes".
"""
binary_maps = np.zeros(self.iris_mask.shape[:2], dtype=np.uint8)
binary_maps += self.pupil_mask
binary_maps += self.iris_mask
return binary_maps.astype(bool)
# open-iris/src/iris/nodes/normalization/common.py
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
"""
from typing import Collection, List, Tuple
import cv2
import numpy as np
from pydantic import Field, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeOrientation, GeometryPolygons, IRImage, NoiseMask, NormalizedIris
from iris.io.errors import NormalizationError
from iris.nodes.normalization.common import correct_orientation, generate_iris_mask, interpolate_pixel_intensity
class PerspectiveNormalization(Algorithm):
"""Implementation of a normalization algorithm which uses perspective transformation to map image pixels.
Algorithm steps:
1) Create a grid of trapezoids around iris in original image based on following algorithm parameters: res_in_phi, res_in_r, intermediate_radiuses.
2) Create a grid of corresponding to each trapezoid rectangles in normalized image.
3) For each corresponding trapezoid, rectangle pair compute perspective matrix to estimate normalized image pixel location in an original image location.
4) Map each normalized image pixel to original image pixel based on estimated perspective matrix and perform bilinear interpolation if necessary.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for PerspectiveNormalization."""
res_in_phi: int = Field(..., gt=0)
res_in_r: int = Field(..., gt=0)
skip_boundary_points: int = Field(..., gt=0)
intermediate_radiuses: Collection[float]
oversat_threshold: int = Field(..., gt=0)
@validator("intermediate_radiuses")
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
__parameters_type__ = Parameters
def __init__(
self,
res_in_phi: int = 512,
res_in_r: int = 128,
skip_boundary_points: int = 1,
intermediate_radiuses: Collection[float] = np.linspace(0.0, 1.0, 8),
oversat_threshold: int = 254,
) -> None:
"""Assign parameters.
Args:
res_in_phi (int): Normalized image phi resolution. Defaults to 512.
res_in_r (int): Normalized image r resolution. Defaults to 128.
skip_boundary_points (int, optional): Take every nth point from estimated boundaries when generating correspondences.
Defaults to 1.
intermediate_radiuses (t.Iterable[float], optional): Intermediate rings radiuses used to generate additional points for estimating transformations.
Defaults to np.linspace(0.0, 1.0, 8).
oversat_threshold (int, optional): threshold for masking over-satuated pixels. Defaults to 254.
"""
super().__init__(
res_in_phi=res_in_phi,
res_in_r=res_in_r,
skip_boundary_points=skip_boundary_points,
intermediate_radiuses=intermediate_radiuses,
oversat_threshold=oversat_threshold,
)
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using perspective transformation estimated for every region of an image separately.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points, dst_points = self._generate_correspondences(pupil_points, iris_points)
normalized_iris = NormalizedIris(
normalized_image=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=np.float32),
normalized_mask=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=bool),
)
for angle_point_idx in range(src_points.shape[1] - 1):
for ring_idx in range(src_points.shape[0] - 1):
current_src, current_dst = self._correspondence_rois_coords(
angle_idx=angle_point_idx,
ring_idx=ring_idx,
src_points=src_points,
dst_points=dst_points,
)
xmin, ymin, xmax, ymax = self._bbox_coords(current_dst)
normalized_image_roi, normalized_mask_roi = self._normalize_roi(
original_image=image.img_data,
iris_mask=iris_mask,
src_points=current_src.astype(np.float32),
dst_points=current_dst.astype(np.float32),
normalize_roi_output_shape=(ymax - ymin, xmax - xmin),
)
normalized_iris.normalized_image[ymin:ymax, xmin:xmax] = normalized_image_roi
normalized_iris.normalized_mask[ymin:ymax, xmin:xmax] = normalized_mask_roi
return normalized_iris
def _generate_correspondences(
self, pupil_points: np.ndarray, iris_points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate correspondences between points in original image and normalized image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with generated correspondences.
"""
pupil_points = pupil_points[:: self.params.skip_boundary_points]
iris_points = iris_points[:: self.params.skip_boundary_points]
src_points = []
for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring)
src_points = np.array(src_points)
num_rings, num_ring_points = src_points.shape[:2]
dst_xs, dst_ys = np.meshgrid(
np.linspace(0, self.params.res_in_phi, num_ring_points).astype(int),
np.linspace(0, self.params.res_in_r, num_rings).astype(int),
)
dst_points = np.array([dst_xs, dst_ys]).transpose((1, 2, 0))
return src_points, dst_points
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
<fim_suffix>
return normalized_image_roi / 255.0, normalized_mask_roi
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
def _correspondence_rois_coords(
self,
angle_idx: int,
ring_idx: int,
src_points: np.ndarray,
dst_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single correspondence ROIs between original image and normalized one based on angle index and ring index.
Args:
angle_idx (int): Boundary point angle index.
ring_idx (int): Intermediate ring index.
src_points (np.ndarray): All mapping points from an original image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
dst_points (np.ndarray): All mapping points from an normalized image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with extracted from src_points and dst_points ROIs.
"""
src_roi = src_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
dst_roi = dst_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
return src_roi.reshape(4, 2), dst_roi.reshape(4, 2)
@staticmethod
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
@staticmethod
def homogeneous2cartesian(points: np.ndarray) -> np.ndarray:
"""Convert points in homogeneous coordinates to cartesian coordinates.
Args:
points (np.ndarray): Points in homogeneous coordinates. Array should be in format: [[x values], [y values], [perspective scale values]].
Returns:
np.ndarray: Points in cartesian coordinates. Returned array will have format: [[x values], [y values]].
"""
points /= points[-1]
points = points[:2]
return points
<fim_middle>except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False | except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
<fim_suffix>
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>except IndexError:
return 0.0 | except IndexError:
return 0.0 | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
<fim_suffix>
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0 | def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0 | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def get_pixel_binary(image: np.ndarray, pixel_x: float, pixel_y: float) -> bool:
"""Get the boolean value of a pixel from a binary image.
Args:
image (np.ndarray): Binary image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
bool: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return False
# open-iris/src/iris/nodes/normalization/nonlinear_normalization.py
def _normalize_all(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize all points of an image using bilinear.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): original input image points.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask.
"""
src_shape = src_points.shape[0:2]
src_points = np.vstack(src_points)
normalized_image = np.array(
[interpolate_pixel_intensity(original_image, pixel_coords=image_xy) for image_xy in src_points]
)
normalized_image = np.reshape(normalized_image, src_shape)
normalized_mask = np.array(
[self.get_pixel_binary(iris_mask, image_xy[0], image_xy[1]) for image_xy in src_points]
)
normalized_mask = np.reshape(normalized_mask, src_shape)
return normalized_image / 255.0, normalized_mask
# open-iris/src/iris/nodes/normalization/perspective_normalization.py
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
"""
from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
<fim_suffix>
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle>def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax | def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/row_viewable_matrix.py
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
# searcharray/searcharray/utils/roaringish.py
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
# searcharray/searcharray/utils/roaringish.py
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
<fim_suffix>
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>"""Return a mask on the postings array indicating which elements contain all terms.""" | """Return a mask on the postings array indicating which elements contain all terms.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/mat_set.py
def __str__(self):
as_str = [""]
for idx, (row, row_next) in enumerate(zip(self.rows, self.rows[1:])):
as_str.append(f"{idx}: {self.cols[row:row_next]}")
return "\n".join(as_str)
# searcharray/searcharray/indexing.py
def _lex_sort(terms_w_posns):
"""Sort terms, then doc_id, then posn."""
# Because docs / posns already sorted, we can just sort on terms
# Equivelant to np.lexsort(terms_w_posns[[::-1], :])
return np.argsort(terms_w_posns[0, :], kind='stable')
# searcharray/searcharray/phrase/middle_out.py
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
<fim_suffix>
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle>"""Decode an encoded bit array into keys / payloads.""" | """Decode an encoded bit array into keys / payloads.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# searcharray/searcharray/postings.py
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
# searcharray/searcharray/phrase/middle_out.py
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
<fim_suffix>
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle>"""Convert keys to range or np.ndarray of uint64.""" | """Convert keys to range or np.ndarray of uint64.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/bitcount.py
def bit_count64(arr):
"""Count the number of bits set in each element in the array."""
arr = arr - ((arr >> _1) & s55)
arr = (arr & s33) + ((arr >> _2) & s33)
arr += (arr >> _4)
arr &= s0F
arr *= s01
arr >>= all_but_one_bit
return arr
# searcharray/searcharray/utils/roaringish.py
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
<fim_suffix>
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
""" | """Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/solr.py
def listify(x):
return x if isinstance(x, list) else [x]
# searcharray/searcharray/phrase/middle_out.py
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
# searcharray/searcharray/phrase/middle_out.py
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
<fim_suffix>
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>"""Return a boolean numpy array indicating which elements contain the given term.""" | """Return a boolean numpy array indicating which elements contain the given term.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/mat_set.py
def __eq__(self, other):
return np.all(self.rows == other.rows) and np.all(self.cols == other.cols)
# searcharray/searcharray/postings.py
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
# searcharray/searcharray/phrase/middle_out.py
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
<fim_suffix>
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle>"""Return payload MSBs from encoded.""" | """Return payload MSBs from encoded.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/similarity.py
def bm25_legacy_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity prior to LUCENE-8563 with k1 + 1 in numerator."""
# (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength))
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = (term_freqs * (k1 + 1)) / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/similarity.py
def bm25_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity function, as in Lucene 9."""
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = term_freqs / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/phrase/middle_out.py
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
<fim_suffix>
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
""" | """Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/row_viewable_matrix.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
# searcharray/searcharray/postings.py
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
# searcharray/searcharray/postings.py
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
"""
import numbers
import numpy as np
from searcharray.utils.mat_set import SparseMatSet
from typing import Optional, Union, Dict, List
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
<fim_suffix>
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
class RowViewableMatrix:
"""A slicable matrix that can return views without copying."""
def __init__(self, mat: SparseMatSet, rows: Optional[np.ndarray] = None, subset=False):
self.mat = mat
self.col_cache: Dict[int, np.ndarray] = {}
self.cols_cached: List[int] = []
if rows is None:
self.rows = np.arange(self.mat.shape[0])
elif isinstance(rows, numbers.Integral):
self.rows = np.array([rows])
else:
self.rows = rows
self.subset = subset
def slice(self, keys):
return RowViewableMatrix(self.mat, self.rows[keys], subset=True)
def __setitem__(self, keys, values):
# Replace nan with 0
self.col_cache = {}
self.cols_cached = []
actual_keys = self.rows[keys]
if isinstance(actual_keys, numbers.Integral):
self.mat[actual_keys] = values
elif len(actual_keys) > 0:
self.mat[actual_keys] = values
def copy_row_at(self, row):
return self.mat[self.rows[row]]
def copy(self):
return RowViewableMatrix(self.mat.copy(), self.rows.copy(), subset=self.subset)
def cols_per_row(self):
return self.mat[self.rows].num_cols_per_row()
def copy_col_at(self, col):
if col not in self.col_cache:
self.col_cache[col] = self.mat[self.rows, col]
self.cols_cached.append(col)
if len(self.cols_cached) > 10:
del self.col_cache[self.cols_cached.pop(0)]
return self.col_cache[col]
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
return self.copy_row_at(key)
else:
return self.slice(key)
@property
def nbytes(self):
return self.mat.nbytes + \
self.rows.nbytes
@property
def shape(self):
return (len(self.rows), self.mat.shape[1])
def resize(self, shape):
self.mat.ensure_capacity(shape[0] - 1)
def __len__(self):
return len(self.rows)
def __repr__(self):
return f"RowViewableMatrix({repr(self.mat)}, {repr(self.rows)})"
def __str__(self):
return f"RowViewableMatrix({str(self.mat)}, {str(self.rows)})"
def __eq__(self, other):
return rowwise_eq(self.mat[self.rows], other.mat[other.rows])
<fim_middle>"""Check equals on a row-by-row basis.""" | """Check equals on a row-by-row basis.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
# searcharray/searcharray/postings.py
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
# searcharray/searcharray/postings.py
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
<fim_suffix>
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>"""Parse Solr's qf, pf, pf2, pf3 field boosts.""" | """Parse Solr's qf, pf, pf2, pf3 field boosts.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/similarity.py
def __call__(self, term_freqs: np.ndarray, doc_freqs: np.ndarray, doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate similarity scores."""
...
# searcharray/searcharray/phrase/middle_out.py
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
<fim_suffix>
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
""" | """Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/roaringish.py
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
# searcharray/searcharray/utils/roaringish.py
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
<fim_suffix>
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle># Find adjacent matches | # Find adjacent matches | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/roaringish.py
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
# searcharray/searcharray/utils/mat_set.py
def __setitem__(self, index, value):
if isinstance(index, numbers.Integral):
if len(value.shape) == 1:
value = value.reshape(1, -1)
set_rows, set_cols = value.nonzero()
if not (value[set_rows, set_cols] == 1).all():
raise ValueError("This sparse matrix only supports setting 1")
self.set_cols(index, set_cols, overwrite=True)
# Multidimensional indexing
elif isinstance(index, tuple):
row, col = index
if value != 1:
raise ValueError("This sparse matrix only supports setting 1")
self.set_cols(row, np.asarray([col]))
# Multiple rows
elif pd.api.types.is_list_like(index):
if len(index) == len(value):
for idx, val in zip(index, value):
self[idx] = val
elif len(value) == 1:
for idx in index:
self[idx] = value
else:
raise ValueError("Index and value must be same length")
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
<fim_suffix>
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle># When other is a scalar value | # When other is a scalar value | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/similarity.py
def __call__(self, term_freqs: np.ndarray, doc_freqs: np.ndarray, doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate similarity scores."""
...
# searcharray/searcharray/phrase/middle_out.py
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
<fim_suffix>
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle># For expensive toknes, we compute doc freq first, so we | # For expensive toknes, we compute doc freq first, so we | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
<fim_suffix>
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle># otherwise, simple expression | # otherwise, simple expression | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/roaringish.py
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
# searcharray/searcharray/utils/roaringish.py
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) <fim_suffix>
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle># ceiling divide | # ceiling divide | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/roaringish.py
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
# searcharray/searcharray/utils/mat_set.py
def __setitem__(self, index, value):
if isinstance(index, numbers.Integral):
if len(value.shape) == 1:
value = value.reshape(1, -1)
set_rows, set_cols = value.nonzero()
if not (value[set_rows, set_cols] == 1).all():
raise ValueError("This sparse matrix only supports setting 1")
self.set_cols(index, set_cols, overwrite=True)
# Multidimensional indexing
elif isinstance(index, tuple):
row, col = index
if value != 1:
raise ValueError("This sparse matrix only supports setting 1")
self.set_cols(row, np.asarray([col]))
# Multiple rows
elif pd.api.types.is_list_like(index):
if len(index) == len(value):
for idx, val in zip(index, value):
self[idx] = val
elif len(value) == 1:
for idx in index:
self[idx] = value
else:
raise ValueError("Index and value must be same length")
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
<fim_suffix>
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle># (not looking at positions, maybe we should?) | # (not looking at positions, maybe we should?) | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
# searcharray/searcharray/postings.py
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
"""
"""Similarity functions given term stats."""
from typing import Protocol
import numpy as np
class Similarity(Protocol):
"""Similarity function protocol."""
def __call__(self, term_freqs: np.ndarray, doc_freqs: np.ndarray, doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate similarity scores."""
...
def bm25_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity function, as in Lucene 9."""
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
<fim_suffix>
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = term_freqs / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
def bm25_legacy_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity prior to LUCENE-8563 with k1 + 1 in numerator."""
# (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength))
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = (term_freqs * (k1 + 1)) / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
def classic_similarity() -> Similarity:
"""Classic Lucene TF-IDF similarity function."""
def classic(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate classic TF-IDF scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf as log((docCount+1)/(docFreq+1)) + 1
idf = np.log((num_docs + 1) / (sum_dfs + 1)) + 1
length_norm = 1.0 / np.sqrt(doc_lens)
# Calculate tf
tf = np.sqrt(term_freqs)
return idf * tf * length_norm
return classic
default_bm25 = bm25_similarity()
<fim_middle># Sum doc freqs | # Sum doc freqs | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/solr.py
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
<fim_suffix>
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle># Count number of rows where the term appears | # Count number of rows where the term appears | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/bitcount.py
def bit_count64(arr):
"""Count the number of bits set in each element in the array."""
arr = arr - ((arr >> _1) & s55)
arr = (arr & s33) + ((arr >> _2) & s33)
arr += (arr >> _4)
arr &= s0F
arr *= s01
arr >>= all_but_one_bit
return arr
# searcharray/searcharray/utils/roaringish.py
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
<fim_suffix>
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle># Combine lhs and rhs matches from two strategies | # Combine lhs and rhs matches from two strategies | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# searcharray/searcharray/postings.py
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
# searcharray/searcharray/phrase/middle_out.py
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
<fim_suffix>
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle># UNFORTUNATE COPY | # UNFORTUNATE COPY | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/middle_out.py
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/row_viewable_matrix.py
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
<fim_suffix>
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop) | if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/solr.py
def listify(x):
return x if isinstance(x, list) else [x]
# searcharray/searcharray/phrase/middle_out.py
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
# searcharray/searcharray/phrase/middle_out.py
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
<fim_suffix>
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token) | if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# searcharray/searcharray/postings.py
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
# searcharray/searcharray/phrase/middle_out.py
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
"""
"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
<fim_suffix>
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle>if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64) | if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/roaringish.py
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
# searcharray/searcharray/phrase/middle_out.py
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
# searcharray/searcharray/indexing.py
def build_index_from_terms_list(postings, Terms):
"""Bulid an index from postings that are already tokenized and point at their term frequencies/posns."""
term_dict = TermDict()
term_doc = SparseMatSetBuilder()
doc_lens = []
avg_doc_length = 0
num_postings = 0
posns = PosnBitArrayBuilder()
posns_enc = PosnBitArrayAlreadyEncBuilder()
# COPY 1
# Consume generator (tokenized postings) into list
# its faster this way?
postings = list(postings)
# COPY 2
for doc_id, tokenized in enumerate(postings):
if isinstance(tokenized, dict):
tokenized = Terms(tokenized, doc_len=len(tokenized))
elif not isinstance(tokenized, Terms):
raise TypeError("Expected a Terms or a dict")
if tokenized.encoded:
posns = posns_enc
doc_lens.append(tokenized.doc_len)
avg_doc_length += doc_lens[-1]
terms = []
for token, term_freq in tokenized.terms():
term_id = term_dict.add_term(token)
terms.append(term_id)
positions = tokenized.positions(token)
if positions is not None:
posns.add_posns(doc_id, term_id, positions)
term_doc.append(terms)
posns.ensure_capacity(doc_id)
num_postings += 1
if num_postings > 0:
avg_doc_length /= num_postings
bit_posns = posns.build()
return RowViewableMatrix(term_doc.build()), bit_posns, term_dict, avg_doc_length, np.array(doc_lens)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
<fim_suffix>
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases") | if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases") | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
<fim_suffix>
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1]) | if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1]) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/similarity.py
def bm25_legacy_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity prior to LUCENE-8563 with k1 + 1 in numerator."""
# (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength))
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = (term_freqs * (k1 + 1)) / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/similarity.py
def bm25_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity function, as in Lucene 9."""
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = term_freqs / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/phrase/middle_out.py
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
<fim_suffix>
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>if mm is None:
mm = "1" | if mm is None:
mm = "1" | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/mat_set.py
def build(self):
return SparseMatSet(cols=np.asarray(self.cols, dtype=np.uint32),
rows=np.asarray(self.rows, dtype=np.uint32))
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
<fim_suffix>
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches | if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/similarity.py
def bm25_legacy_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity prior to LUCENE-8563 with k1 + 1 in numerator."""
# (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength))
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = (term_freqs * (k1 + 1)) / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/similarity.py
def bm25_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity function, as in Lucene 9."""
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = term_freqs / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
# searcharray/searcharray/phrase/middle_out.py
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
<fim_suffix>
return qf_scores, explain
<fim_middle>if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx] | if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx] | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def compute_phrase_freqs(term_posns, phrase_freqs, slop=1, width=10):
"""Compute phrase freq using matrix-diff method for docs up to width posns. Skip others.
Parameters
----------
term_posns: list of np.ndarray term positions for a given term across multiple docs
phrase_freqs: np.ndarray, phrase freqs for each doc present in term_posns
Returns
-------
phrase_freqs: np.ndarray, phrase freqs for each doc present in mask
See Also
--------
Colab notebook: https://colab.research.google.com/drive/1NRxeO8Ya8jSlFP5YwZaGh1-43kDH4OXG?authuser=1#scrollTo=5JZV8svpauYB
"""
if len(term_posns[0]) != len(phrase_freqs):
raise ValueError("term_posns and phrase_freqs must be same length")
stacked = stack_term_posns(term_posns, phrase_freqs, width=width)
phrase_freqs = _compute_phrase_freqs(stacked, phrase_freqs, slop=slop)
phrase_freqs[phrase_freqs == -2] = -1
return phrase_freqs
# searcharray/searcharray/postings.py
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
# searcharray/searcharray/postings.py
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
<fim_suffix>
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result | if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/roaringish.py
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
# searcharray/searcharray/postings.py
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
# searcharray/searcharray/utils/mat_set.py
def __getitem__(self, key):
# Iterate keys
beg_keys = self.rows[:-1][key]
end_keys = self.rows[1:][key]
if not isinstance(beg_keys, np.ndarray):
beg_keys = np.asarray([beg_keys])
end_keys = np.asarray([end_keys])
cols = [self.cols[beg:end] for beg, end in zip(beg_keys, end_keys)]
rows = [0] + [len(row) for row in cols]
rows = np.asarray(rows).flatten()
rows = np.cumsum(rows)
try:
cols = np.concatenate(cols)
except ValueError:
cols = np.asarray([], dtype=np.uint32)
return SparseMatSet(cols, rows)
"""
"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
<fim_suffix>
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle>if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids] | if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids] | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/utils/roaringish.py
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
# searcharray/searcharray/indexing.py
def _compute_doc_lens(posns: np.ndarray, doc_ids: np.ndarray, num_docs: int) -> np.ndarray:
"""Given an array of positions, compute the length of each document."""
doc_lens = np.zeros(num_docs, dtype=np.uint32)
# Find were we ave posns for each doc
non_empty_doc_lens = -np.diff(posns) + 1
non_empty_idxs = np.argwhere(non_empty_doc_lens > 0).flatten()
non_empty_doc_ids = doc_ids[non_empty_idxs]
non_empty_doc_lens = non_empty_doc_lens[non_empty_idxs]
doc_lens[non_empty_doc_ids] = non_empty_doc_lens
if doc_ids[-1] not in non_empty_doc_ids:
doc_lens[doc_ids[-1]] = posns[-1] + 1
return doc_lens
# searcharray/searcharray/utils/mat_set.py
def __setitem__(self, index, value):
if isinstance(index, numbers.Integral):
if len(value.shape) == 1:
value = value.reshape(1, -1)
set_rows, set_cols = value.nonzero()
if not (value[set_rows, set_cols] == 1).all():
raise ValueError("This sparse matrix only supports setting 1")
self.set_cols(index, set_cols, overwrite=True)
# Multidimensional indexing
elif isinstance(index, tuple):
row, col = index
if value != 1:
raise ValueError("This sparse matrix only supports setting 1")
self.set_cols(row, np.asarray([col]))
# Multiple rows
elif pd.api.types.is_list_like(index):
if len(index) == len(value):
for idx, val in zip(index, value):
self[idx] = val
elif len(value) == 1:
for idx in index:
self[idx] = value
else:
raise ValueError("Index and value must be same length")
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
<fim_suffix>
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>return np.array([], dtype=bool) | return np.array([], dtype=bool) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/solr.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/postings.py
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
# searcharray/searcharray/postings.py
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
# searcharray/searcharray/postings.py
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
"""
"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
<fim_suffix>
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle>max_scores = np.zeros(len(frame)) | max_scores = np.zeros(len(frame)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/phrase/posn_diffs.py
def stack_term_posns(term_posns: List[List[np.ndarray]], phrase_freqs: np.ndarray, width: int = 10):
# Pad for easy difference computation
keep_term_posns = []
# keep_mask = np.ones(len(self), dtype=bool)
for term_posn in term_posns:
this_term_posns = vstack_with_mask(term_posn, phrase_freqs, width=width)
keep_term_posns.append(this_term_posns)
return keep_term_posns
# searcharray/searcharray/utils/row_viewable_matrix.py
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
# searcharray/searcharray/utils/mat_set.py
def __getitem__(self, key):
# Iterate keys
beg_keys = self.rows[:-1][key]
end_keys = self.rows[1:][key]
if not isinstance(beg_keys, np.ndarray):
beg_keys = np.asarray([beg_keys])
end_keys = np.asarray([end_keys])
cols = [self.cols[beg:end] for beg, end in zip(beg_keys, end_keys)]
rows = [0] + [len(row) for row in cols]
rows = np.asarray(rows).flatten()
rows = np.cumsum(rows)
try:
cols = np.concatenate(cols)
except ValueError:
cols = np.asarray([], dtype=np.uint32)
return SparseMatSet(cols, rows)
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
<fim_suffix>
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>mask = mask & curr_mask | mask = mask & curr_mask | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>searcharray/searcharray/postings.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# searcharray/searcharray/term_dict.py
def get_term_id(self, term):
try:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
# searcharray/searcharray/term_dict.py
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
# searcharray/searcharray/utils/mat_set.py
def build(self):
return SparseMatSet(cols=np.asarray(self.cols, dtype=np.uint32),
rows=np.asarray(self.rows, dtype=np.uint32))
"""
"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
<fim_suffix>
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle>return matches | return matches | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |