text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
import pytest import pytest_asyncio from mock import AsyncMock, patch from azure.cosmos.aio import CosmosClient, DatabaseProxy from api.dependencies.database import Database from models.domain.request_action import RequestAction from models.domain.resource import Resource from models.domain.user_resource import UserResource from models.domain.shared_service import SharedService from tests_ma.test_api.test_routes.test_resource_helpers import FAKE_CREATE_TIMESTAMP from models.domain.authentication import User from models.domain.operation import Operation, OperationStep, Status from models.domain.resource_template import ( Pipeline, PipelineStep, PipelineStepProperty, ResourceTemplate, ResourceType, ) from models.domain.user_resource_template import UserResourceTemplate from models.schemas.user_resource_template import ( UserResourceTemplateInCreate, UserResourceTemplateInResponse, ) from models.schemas.workspace_template import WorkspaceTemplateInCreate from models.schemas.workspace_service_template import WorkspaceServiceTemplateInCreate from models.schemas.shared_service_template import SharedServiceTemplateInCreate @pytest.fixture def input_workspace_template(): return WorkspaceTemplateInCreate( name="my-tre-workspace", version="0.0.1", current=True, json_schema={ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json", "type": "object", "title": "My Workspace Template", "description": "This is a test workspace template schema.", "required": [], "properties": { "updateable_property": { "type": "string", "title": "Test updateable property", "updateable": True, }, "fixed_property": { "type": "string", "title": "Test fixed property", "updateable": False, }, "supply_secret": { "type": "boolean", "title": "Choose to supply a secret", "updateable": True, }, "prop_with_nested_secret": { "type": "object", "title": "Property containing a nested secret val", "properties": { "nested_secret": { "type": "string", "title": "Nested Secret", "sensitive": True, } }, }, }, "allOf": [ { "if": { "properties": {"supply_secret": {"const": True}}, "required": ["supply_secret"], }, "then": { "properties": { "secret": { "type": "string", "title": "Secret", "sensitive": True, } } }, } ], }, customActions=[ {"name": "my-custom-action", "description": "This is a test custom action"} ], ) @pytest.fixture def input_workspace_service_template(): return WorkspaceServiceTemplateInCreate( name="my-tre-workspace-service", version="0.0.1", current=True, json_schema={ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json", "type": "object", "title": "My Workspace Service Template", "description": "This is a test workspace service template schema.", "required": [], "properties": {}, }, customActions=[ {"name": "my-custom-action", "description": "This is a test custom action"} ], ) @pytest.fixture def input_user_resource_template(): return UserResourceTemplateInCreate( name="my-tre-user-resource", version="0.0.1", current=True, json_schema={ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json", "type": "object", "title": "My User Resource Template", "description": "These is a test user resource template schema", "required": [], "properties": {}, }, customActions=[ {"name": "my-custom-action", "description": "This is a test custom action"} ], ) @pytest.fixture def input_shared_service_template(): return SharedServiceTemplateInCreate( name="my-tre-shared-service", version="0.0.1", current=True, json_schema={ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/shared_services/mysharedservice/shared_service.json", "type": "object", "title": "My Shared Service Template", "description": "This is a test shared service template schema.", "required": [], "properties": {}, }, ) @pytest.fixture def basic_resource_template(input_workspace_template): return ResourceTemplate( id="1234-5678", name=input_workspace_template.name, description=input_workspace_template.json_schema["description"], version=input_workspace_template.name, resourceType=ResourceType.Workspace, current=True, required=input_workspace_template.json_schema["required"], authorizedRoles=input_workspace_template.json_schema["authorizedRoles"] if "authorizedRoles" in input_workspace_template.json_schema else [], properties=input_workspace_template.json_schema["properties"], allOf=input_workspace_template.json_schema["allOf"], customActions=input_workspace_template.customActions, ) @pytest.fixture def basic_workspace_service_template(input_workspace_template): return ResourceTemplate( id="1234-5678", name=input_workspace_template.name, description=input_workspace_template.json_schema["description"], version=input_workspace_template.name, resourceType=ResourceType.WorkspaceService, current=True, required=input_workspace_template.json_schema["required"], authorizedRoles=input_workspace_template.json_schema["authorizedRoles"] if "authorizedRoles" in input_workspace_template.json_schema else [], properties=input_workspace_template.json_schema["properties"], customActions=input_workspace_template.customActions, ) @pytest.fixture def basic_user_resource_template(input_user_resource_template): return UserResourceTemplate( id="1234-5678", name=input_user_resource_template.name, parentWorkspaceService="parent-workspace-service-name", description=input_user_resource_template.json_schema["description"], version=input_user_resource_template.version, resourceType=ResourceType.UserResource, current=True, required=input_user_resource_template.json_schema["required"], authorizedRoles=input_user_resource_template.json_schema["authorizedRoles"] if "authorizedRoles" in input_user_resource_template.json_schema else [], properties=input_user_resource_template.json_schema["properties"], customActions=input_user_resource_template.customActions, ) @pytest.fixture def basic_shared_service_template(input_shared_service_template): return ResourceTemplate( id="1234-5678", name=input_shared_service_template.name, description=input_shared_service_template.json_schema["description"], version=input_shared_service_template.name, resourceType=ResourceType.SharedService, current=True, required=input_shared_service_template.json_schema["required"], authorizedRoles=input_shared_service_template.json_schema["authorizedRoles"] if "authorizedRoles" in input_shared_service_template.json_schema else [], properties=input_shared_service_template.json_schema["properties"], actions=input_shared_service_template.customActions, ) @pytest.fixture def user_resource_template_in_response(input_user_resource_template): return UserResourceTemplateInResponse( id="1234-5678", name=input_user_resource_template.name, parentWorkspaceService="parent-workspace-service-name", description=input_user_resource_template.json_schema["description"], version=input_user_resource_template.version, resourceType=ResourceType.UserResource, current=True, required=input_user_resource_template.json_schema["required"], authorizedRoles=input_user_resource_template.json_schema["authorizedRoles"] if "authorizedRoles" in input_user_resource_template.json_schema else [], properties=input_user_resource_template.json_schema["properties"], customActions=input_user_resource_template.customActions, system_properties={}, ) @pytest.fixture def multi_step_resource_template(basic_shared_service_template) -> ResourceTemplate: return ResourceTemplate( id="123", name="template1", description="description", version="0.1.0", resourceType=ResourceType.Workspace, current=True, required=[], properties={}, customActions=[], pipeline=Pipeline( install=[ PipelineStep( stepId="pre-step-1", stepTitle="Title for pre-step-1", resourceTemplateName=basic_shared_service_template.name, resourceType=basic_shared_service_template.resourceType, resourceAction="upgrade", properties=[ PipelineStepProperty( name="display_name", type="string", value="new name" ) ], ), PipelineStep(stepId="main"), PipelineStep( stepId="post-step-1", stepTitle="Title for post-step-1", resourceTemplateName=basic_shared_service_template.name, resourceType=basic_shared_service_template.resourceType, resourceAction="upgrade", properties=[ PipelineStepProperty( name="display_name", type="string", value="old name" ) ], ), ], uninstall=[ PipelineStep( stepId="pre-step-1", stepTitle="Title for pre-step-1", resourceTemplateName=basic_shared_service_template.name, resourceType=basic_shared_service_template.resourceType, resourceAction="upgrade", properties=[ PipelineStepProperty( name="display_name", type="string", value="new name" ) ], ), PipelineStep(stepId="main"), PipelineStep( stepId="post-step-1", stepTitle="Title for post-step-1", resourceTemplateName=basic_shared_service_template.name, resourceType=basic_shared_service_template.resourceType, resourceAction="upgrade", properties=[ PipelineStepProperty( name="display_name", type="string", value="old name" ) ], ), ], ), ) @pytest.fixture def test_user(): return User(id="user-id", name="test user", email="test@user.com") @pytest.fixture def basic_shared_service(test_user, basic_shared_service_template): id = "59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76" return SharedService( id=id, templateName=basic_shared_service_template.name, templateVersion=basic_shared_service_template.version, etag="", properties={ "display_name": "shared_service_resource name", }, resourcePath=f"/shared-services/{id}", updatedWhen=FAKE_CREATE_TIMESTAMP, user=test_user, ) @pytest.fixture def user_resource_multi(test_user, multi_step_resource_template): id = "resource-id" return UserResource( id=id, templateName=multi_step_resource_template.name, templateVersion=multi_step_resource_template.version, etag="", properties={}, resourcePath=f"/workspaces/foo/workspace-services/bar/user-resources/{id}", updatedWhen=FAKE_CREATE_TIMESTAMP, user=test_user, ) @pytest.fixture def multi_step_operation( test_user, basic_shared_service_template, basic_shared_service ): return Operation( id="op-guid-here", resourceId="59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", action=RequestAction.Install, user=test_user, resourcePath="/workspaces/59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", createdWhen=FAKE_CREATE_TIMESTAMP, updatedWhen=FAKE_CREATE_TIMESTAMP, steps=[ OperationStep( id="random-uuid-1", templateStepId="pre-step-1", stepTitle="Title for pre-step-1", resourceAction="upgrade", resourceTemplateName=basic_shared_service_template.name, resourceType=basic_shared_service_template.resourceType, resourceId=basic_shared_service.id, status=Status.AwaitingUpdate, message="This resource is waiting to be updated", updatedWhen=FAKE_CREATE_TIMESTAMP, sourceTemplateResourceId="59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", ), OperationStep( id="random-uuid-2", templateStepId="main", stepTitle="Main step for 59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", resourceAction="install", resourceType=ResourceType.Workspace, resourceTemplateName="template1", resourceId="59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", status=Status.AwaitingDeployment, message="This resource is waiting to be deployed", updatedWhen=FAKE_CREATE_TIMESTAMP, sourceTemplateResourceId="59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", ), OperationStep( id="random-uuid-3", templateStepId="post-step-1", stepTitle="Title for post-step-1", resourceAction="upgrade", resourceType=basic_shared_service_template.resourceType, resourceTemplateName=basic_shared_service_template.name, resourceId=basic_shared_service.id, status=Status.AwaitingUpdate, message="This resource is waiting to be updated", updatedWhen=FAKE_CREATE_TIMESTAMP, sourceTemplateResourceId="59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76", ), ], ) @pytest.fixture def primary_resource() -> Resource: return Resource( id="123", name="test resource", isEnabled=True, templateName="template name", templateVersion="7", resourceType="workspace", _etag="", properties={ "display_name": "test_resource name", "address_prefix": ["172.0.0.1", "192.168.0.1"], "fqdn": ["*.pypi.org", "files.pythonhosted.org", "security.ubuntu.com"], "my_protocol": "MyCoolProtocol", }, ) @pytest.fixture def primary_user_resource() -> Resource: return Resource( id="123", name="test resource", isEnabled=True, templateName="template name", templateVersion="7", resourceType="user-resource", _etag="", properties={ "display_name": "test_resource name", "address_prefix": ["172.0.0.1", "192.168.0.1"], "fqdn": ["*.pypi.org", "files.pythonhosted.org", "security.ubuntu.com"], "my_protocol": "MyCoolProtocol", }, ) @pytest.fixture def primary_workspace_service_resource() -> Resource: return Resource( id="123", name="test resource", isEnabled=True, templateName="template name", templateVersion="7", resourceType="workspace-service", _etag="", properties={ "display_name": "test_workspace_service_resource name", "address_prefix": ["172.0.0.1", "192.168.0.1"], "fqdn": ["*.pypi.org", "files.pythonhosted.org", "security.ubuntu.com"], "my_protocol": "MyCoolProtocol", }, ) @pytest.fixture def resource_ws_parent() -> Resource: return Resource( id="234", name="ws test resource", isEnabled=True, templateName="ws template name", templateVersion="8", resourceType="workspace", _etag="", properties={ "display_name": "ImTheParentWS", "address_prefix": ["172.1.1.1", "192.168.1.1"], "fqdn": ["*.pypi.org", "security.ubuntu.com"], "my_protocol": "MyWSCoolProtocol", }, ) @pytest.fixture def resource_ws_svc_parent() -> Resource: return Resource( id="345", name="ws svc test resource", isEnabled=True, templateName="svc template name", templateVersion="9", resourceType="workspace-service", _etag="", properties={ "display_name": "ImTheParentWSSvc", "address_prefix": ["172.2.2.2", "192.168.2.2"], "fqdn": ["*.pypi.org", "files.pythonhosted.org"], "my_protocol": "MyWSSvcCoolProtocol", }, ) @pytest.fixture def resource_to_update() -> Resource: return Resource( id="123", name="Firewall", isEnabled=True, templateName="template name", templateVersion="7", resourceType="workspace", _etag="", properties={}, ) @pytest.fixture def pipeline_step() -> PipelineStep: return PipelineStep( properties=[ PipelineStepProperty( name="rule_collections", type="array", arraySubstitutionAction="overwrite", arrayMatchField="name", value={ "name": "arc-web_app_subnet_nexus_api", "action": "Allow", "rules": [ { "name": "nexus-package-sources-api", "description": "Deployed by {{ resource.id }}", "protocols": [ {"port": "443", "type": "Https"}, { "port": "80", "type": "{{ resource.properties.my_protocol }}", }, ], "target_fqdns": "{{ resource.properties.fqdn }}", "source_addresses": "{{ resource.properties.address_prefix }}", } ], }, ) ] ) @pytest.fixture def simple_pipeline_step() -> PipelineStep: return PipelineStep( properties=[ PipelineStepProperty( name="just_text", type="string", value="Updated by {{resource.id}}" ), PipelineStepProperty( name="just_text_2", type="string", value="No substitution, just a fixed string here", ), PipelineStepProperty( name="just_text_3", type="string", value="Multiple substitutions -> {{resource.id}} and {{resource.templateName}}", ), ] ) @pytest_asyncio.fixture(autouse=True) async def no_database(): with patch('api.dependencies.database.get_credential_async', return_value=AsyncMock()), \ patch('api.dependencies.database.CosmosDBManagementClient', return_value=AsyncMock()), \ patch('api.dependencies.database.CosmosClient', return_value=AsyncMock(spec=CosmosClient)) as cosmos_client_mock: cosmos_client_mock.return_value.get_database_client.return_value = AsyncMock(spec=DatabaseProxy) yield Database()
AzureTRE/api_app/tests_ma/conftest.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/conftest.py", "repo_id": "AzureTRE", "token_count": 10402 }
100
import random from unittest.mock import AsyncMock import uuid import pytest from mock import patch from fastapi import status from models.domain.resource import ResourceHistoryItem from tests_ma.test_api.conftest import create_admin_user, create_test_user from .test_workspaces import FAKE_CREATE_TIMESTAMP, FAKE_UPDATE_TIMESTAMP, OPERATION_ID, sample_resource_operation from db.errors import EntityDoesNotExist from models.domain.shared_service import SharedService from resources import strings from services.authentication import get_current_admin_user, get_current_tre_user_or_tre_admin from azure.cosmos.exceptions import CosmosAccessConditionFailedError pytestmark = pytest.mark.asyncio SHARED_SERVICE_ID = 'abcad738-7265-4b5f-9eae-a1a62928772e' ETAG = "some-etag-value" @pytest.fixture def shared_service_input(): return { "templateName": "test-shared-service", "properties": { "display_name": "display" } } def sample_shared_service(shared_service_id=SHARED_SERVICE_ID): return SharedService( id=shared_service_id, templateName="tre-shared-service-base", templateVersion="0.1.0", etag="", properties={ 'display_name': 'A display name', 'description': 'desc here', 'overview': 'overview here', 'private_field_1': 'value_1', 'private_field_2': 'value_2' }, resourcePath=f'/shared-services/{shared_service_id}', updatedWhen=FAKE_CREATE_TIMESTAMP, user=create_admin_user() ) def sample_resource_history(history_length, shared_service_id=SHARED_SERVICE_ID) -> ResourceHistoryItem: resource_history = [] user = create_test_user() for version in range(history_length): resource_history_item = ResourceHistoryItem( id=str(uuid.uuid4()), resourceId=shared_service_id, isEnabled=True, resourceVersion=version, templateVersion="template_version", properties={ 'display_name': 'initial display name', 'description': 'initial description', 'computed_prop': 'computed_val' }, updatedWhen=FAKE_CREATE_TIMESTAMP, user=user ) resource_history.append(resource_history_item) return resource_history class TestSharedServiceRoutesThatDontRequireAdminRigths: @pytest.fixture(autouse=True, scope='class') def log_in_with_non_admin_user(self, app, non_admin_user): with patch('services.aad_authentication.AzureADAuthorization._get_user_from_token', return_value=non_admin_user()): app.dependency_overrides[get_current_tre_user_or_tre_admin] = non_admin_user yield app.dependency_overrides = {} # [GET] /shared-services @patch("api.routes.shared_services.SharedServiceRepository.get_active_shared_services", return_value=None) @patch("api.routes.shared_services.enrich_resource_with_available_upgrades", return_value=None) async def test_get_shared_services_returns_list_of_shared_services_for_user(self, _, get_active_shared_services_mock, app, client): shared_services = [sample_shared_service()] get_active_shared_services_mock.return_value = shared_services response = await client.get(app.url_path_for(strings.API_GET_ALL_SHARED_SERVICES)) assert response.status_code == status.HTTP_200_OK assert response.json()["sharedServices"][0]["id"] == sample_shared_service().id # check that as a user we only get the restricted resource model assert 'private_field_1' not in response.json()["sharedServices"][0]["properties"] assert 'private_field_2' not in response.json()["sharedServices"][0]["properties"] # [GET] /shared-services/<shared-service-id> @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service()) @patch("api.routes.shared_services.enrich_resource_with_available_upgrades", return_value=None) async def test_get_shared_service_returns_shared_service_result_for_user(self, _, get_shared_service_mock, app, client): shared_service = sample_shared_service(shared_service_id=str(uuid.uuid4())) get_shared_service_mock.return_value = shared_service response = await client.get( app.url_path_for(strings.API_GET_SHARED_SERVICE_BY_ID, shared_service_id=SHARED_SERVICE_ID)) assert response.status_code == status.HTTP_200_OK obj = response.json()["sharedService"] assert obj["id"] == shared_service.id # check that as a user we only get the restricted resource model assert 'private_field_1' not in obj["properties"] assert 'private_field_2' not in obj["properties"] class TestSharedServiceRoutesThatRequireAdminRights: @pytest.fixture(autouse=True, scope='class') def _prepare(self, app, admin_user): with patch('services.aad_authentication.AzureADAuthorization._get_user_from_token', return_value=admin_user()): app.dependency_overrides[get_current_tre_user_or_tre_admin] = admin_user app.dependency_overrides[get_current_admin_user] = admin_user yield app.dependency_overrides = {} # [GET] /shared-services @patch("api.routes.shared_services.SharedServiceRepository.get_active_shared_services", return_value=None) @patch("api.routes.shared_services.enrich_resource_with_available_upgrades", return_value=None) async def test_get_shared_services_returns_list_of_shared_services_for_admin_user(self, _, get_active_shared_services_mock, app, client): shared_services = [sample_shared_service()] get_active_shared_services_mock.return_value = shared_services response = await client.get(app.url_path_for(strings.API_GET_ALL_SHARED_SERVICES)) assert response.status_code == status.HTTP_200_OK assert response.json()["sharedServices"][0]["id"] == sample_shared_service().id # check that as a user we only get the restricted resource model assert response.json()["sharedServices"][0]["properties"]["private_field_1"] == "value_1" assert response.json()["sharedServices"][0]["properties"]["private_field_2"] == "value_2" # [GET] /shared-services/{shared_service_id} @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service()) @patch("api.routes.shared_services.enrich_resource_with_available_upgrades", return_value=None) async def test_get_shared_service_returns_shared_service_result(self, _, get_shared_service_mock, app, client): shared_service = sample_shared_service(shared_service_id=str(uuid.uuid4())) get_shared_service_mock.return_value = shared_service response = await client.get( app.url_path_for(strings.API_GET_SHARED_SERVICE_BY_ID, shared_service_id=SHARED_SERVICE_ID)) assert response.status_code == status.HTTP_200_OK obj = response.json()["sharedService"] assert obj["id"] == shared_service.id # check that as admin we DO get the full model assert obj["properties"]["private_field_1"] == "value_1" assert obj["properties"]["private_field_2"] == "value_2" # [GET] /shared-services/{shared_service_id} @patch("api.routes.shared_services.SharedServiceRepository.get_active_shared_services") @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", side_effect=EntityDoesNotExist) async def test_get_shared_service_raises_404_if_not_found(self, get_shared_service_mock, _, app, client): get_shared_service_mock.return_value = sample_shared_service(SHARED_SERVICE_ID) response = await client.get( app.url_path_for(strings.API_GET_SHARED_SERVICE_BY_ID, shared_service_id=SHARED_SERVICE_ID)) assert response.status_code == status.HTTP_404_NOT_FOUND # [PATCH] /shared-services/{shared_service_id} @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", side_effect=EntityDoesNotExist) async def test_patch_shared_service_returns_404_if_does_not_exist(self, _, app, client): response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID), json='{"enabled": true}') assert response.status_code == status.HTTP_404_NOT_FOUND # [PATCH] /shared-services/{shared_service_id} @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service()) @patch("api.dependencies.shared_services.SharedServiceRepository.patch_shared_service", side_effect=CosmosAccessConditionFailedError) async def test_patch_shared_service_returns_409_if_bad_etag(self, _, __, app, client): shared_service_patch = {"isEnabled": True} response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID), json=shared_service_patch, headers={"etag": ETAG}) assert response.status_code == status.HTTP_409_CONFLICT assert response.text == strings.ETAG_CONFLICT # [PATCH] /shared-services/{shared_service_id} @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", side_effect=EntityDoesNotExist) async def test_patch_shared_service_returns_422_if_invalid_id(self, get_shared_service_mock, app, client): shared_service_id = "IAmNotEvenAGUID!" get_shared_service_mock.return_value = sample_shared_service(shared_service_id) response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=shared_service_id), json={"enabled": True}) assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY # [PATCH] /shared-services/{shared_service_id} @patch("api.routes.shared_services.ResourceHistoryRepository.save_item", return_value=AsyncMock()) @patch("api.routes.shared_services.SharedServiceRepository.get_timestamp", return_value=FAKE_UPDATE_TIMESTAMP) @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service(SHARED_SERVICE_ID)) @patch("api.routes.shared_services.ResourceTemplateRepository.get_template_by_name_and_version", return_value=None) @patch("api.routes.shared_services.SharedServiceRepository.update_item_with_etag", return_value=sample_shared_service()) @patch("api.routes.shared_services.send_resource_request_message", return_value=sample_resource_operation(resource_id=SHARED_SERVICE_ID, operation_id=OPERATION_ID)) async def test_patch_shared_service_patches_shared_service(self, _, update_item_mock, __, ___, ____, _____, app, client): shared_service_patch = {"isEnabled": False} modified_shared_service = sample_shared_service() modified_shared_service.isEnabled = False modified_shared_service.resourceVersion = 1 modified_shared_service.updatedWhen = FAKE_UPDATE_TIMESTAMP modified_shared_service.user = create_admin_user() response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID), json=shared_service_patch, headers={"etag": ETAG}) update_item_mock.assert_called_once_with(modified_shared_service, ETAG) assert response.status_code == status.HTTP_202_ACCEPTED # [PATCH] /shared-services/{shared_service_id} @patch("api.routes.shared_services.ResourceHistoryRepository.save_item", return_value=AsyncMock()) @patch("api.routes.shared_services.SharedServiceRepository.get_timestamp", return_value=FAKE_UPDATE_TIMESTAMP) @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service(SHARED_SERVICE_ID)) @patch("api.routes.shared_services.ResourceTemplateRepository.get_template_by_name_and_version", return_value=sample_shared_service()) @patch("api.routes.shared_services.SharedServiceRepository.update_item_with_etag", return_value=sample_shared_service()) @patch("api.routes.shared_services.send_resource_request_message", return_value=sample_resource_operation(resource_id=SHARED_SERVICE_ID, operation_id=OPERATION_ID)) async def test_patch_shared_service_with_upgrade_minor_version_patches_shared_service(self, _, update_item_mock, __, ___, ____, _____, app, client): shared_service_patch = {"templateVersion": "0.2.0"} modified_shared_service = sample_shared_service() modified_shared_service.isEnabled = True modified_shared_service.resourceVersion = 1 modified_shared_service.updatedWhen = FAKE_UPDATE_TIMESTAMP modified_shared_service.user = create_admin_user() modified_shared_service.templateVersion = "0.2.0" response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID), json=shared_service_patch, headers={"etag": ETAG}) update_item_mock.assert_called_once_with(modified_shared_service, ETAG) assert response.status_code == status.HTTP_202_ACCEPTED # [PATCH] /shared-services/{shared_service_id} @patch("api.routes.shared_services.ResourceHistoryRepository.save_item", return_value=AsyncMock()) @patch("api.routes.shared_services.SharedServiceRepository.get_timestamp", return_value=FAKE_UPDATE_TIMESTAMP) @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service(SHARED_SERVICE_ID)) @patch("api.routes.shared_services.ResourceTemplateRepository.get_template_by_name_and_version", return_value=sample_shared_service()) @patch("api.routes.shared_services.SharedServiceRepository.update_item_with_etag", return_value=sample_shared_service()) @patch("api.routes.shared_services.send_resource_request_message", return_value=sample_resource_operation(resource_id=SHARED_SERVICE_ID, operation_id=OPERATION_ID)) async def test_patch_shared_service_with_upgrade_major_version_and_force_update_patches_shared_service(self, _, update_item_mock, __, ___, ____, _____, app, client): shared_service_patch = {"templateVersion": "2.0.0"} modified_shared_service = sample_shared_service() modified_shared_service.isEnabled = True modified_shared_service.resourceVersion = 1 modified_shared_service.updatedWhen = FAKE_UPDATE_TIMESTAMP modified_shared_service.user = create_admin_user() modified_shared_service.templateVersion = "2.0.0" response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID) + "?force_version_update=True", json=shared_service_patch, headers={"etag": ETAG}) update_item_mock.assert_called_once_with(modified_shared_service, ETAG) assert response.status_code == status.HTTP_202_ACCEPTED # [PATCH] /shared-services/{shared_service_id} @patch("api.routes.shared_services.ResourceHistoryRepository.save_item", return_value=AsyncMock()) @patch("api.routes.shared_services.SharedServiceRepository.get_timestamp", return_value=FAKE_UPDATE_TIMESTAMP) @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service(SHARED_SERVICE_ID)) @patch("api.routes.shared_services.ResourceTemplateRepository.get_template_by_name_and_version", return_value=None) @patch("api.routes.shared_services.SharedServiceRepository.update_item_with_etag", return_value=sample_shared_service()) @patch("api.routes.shared_services.send_resource_request_message", return_value=sample_resource_operation(resource_id=SHARED_SERVICE_ID, operation_id=OPERATION_ID)) async def test_patch_shared_service_with_upgrade_major_version_returns_bad_request(self, _, update_item_mock, __, ___, ____, _____, app, client): shared_service_patch = {"templateVersion": "2.0.0"} modified_shared_service = sample_shared_service() modified_shared_service.isEnabled = True modified_shared_service.resourceVersion = 1 modified_shared_service.updatedWhen = FAKE_UPDATE_TIMESTAMP modified_shared_service.user = create_admin_user() response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID), json=shared_service_patch, headers={"etag": ETAG}) assert response.status_code == status.HTTP_400_BAD_REQUEST assert response.text == 'Attempt to upgrade from 0.1.0 to 2.0.0 denied. major version upgrade is not allowed.' # [PATCH] /shared-services/{shared_service_id} @patch("api.routes.shared_services.ResourceHistoryRepository.save_item", return_value=AsyncMock()) @patch("api.routes.shared_services.SharedServiceRepository.get_timestamp", return_value=FAKE_UPDATE_TIMESTAMP) @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service(SHARED_SERVICE_ID)) @patch("api.routes.shared_services.ResourceTemplateRepository.get_template_by_name_and_version", return_value=None) @patch("api.routes.shared_services.SharedServiceRepository.update_item_with_etag", return_value=sample_shared_service()) @patch("api.routes.shared_services.send_resource_request_message", return_value=sample_resource_operation(resource_id=SHARED_SERVICE_ID, operation_id=OPERATION_ID)) async def test_patch_shared_service_with_downgrade_version_returns_bad_request(self, _, update_item_mock, __, ___, ____, _____, app, client): shared_service_patch = {"templateVersion": "0.0.1"} modified_shared_service = sample_shared_service() modified_shared_service.isEnabled = True modified_shared_service.resourceVersion = 1 modified_shared_service.updatedWhen = FAKE_UPDATE_TIMESTAMP modified_shared_service.user = create_admin_user() response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID), json=shared_service_patch, headers={"etag": ETAG}) assert response.status_code == status.HTTP_400_BAD_REQUEST assert response.text == 'Attempt to downgrade from 0.1.0 to 0.0.1 denied. version downgrade is not allowed.' # [GET] /shared-services/{shared_service_id}/history @patch("api.routes.shared_services.ResourceHistoryRepository.get_resource_history_by_resource_id") @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id") async def test_get_shared_service_history_returns_shared_service_history_result(self, get_shared_service_mock, get_resource_history_mock, app, client): sample_guid = str(uuid.uuid4()) sample_history_length = random.randint(1, 10) shared_service = sample_shared_service(shared_service_id=sample_guid) shared_service_history = sample_resource_history(history_length=sample_history_length, shared_service_id=sample_guid) get_shared_service_mock.return_value = shared_service get_resource_history_mock.return_value = shared_service_history response = await client.get( app.url_path_for(strings.API_GET_RESOURCE_HISTORY, shared_service_id=SHARED_SERVICE_ID)) assert response.status_code == status.HTTP_200_OK obj = response.json()["resource_history"] assert len(obj) == sample_history_length for item in obj: assert item["resourceId"] == shared_service.id # [GET] /shared-services/{shared_service_id}/history @patch("api.routes.shared_services.ResourceHistoryRepository.get_resource_history_by_resource_id") @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service()) async def test_get_shared_service_history_returns_empty_list_when_no_history(self, _, get_resource_history_mock, app, client): get_resource_history_mock.return_value = [] response = await client.get( app.url_path_for(strings.API_GET_RESOURCE_HISTORY, shared_service_id=SHARED_SERVICE_ID)) assert response.status_code == status.HTTP_200_OK obj = response.json()["resource_history"] assert len(obj) == 0 # [PATCH] /shared-services/{shared_service_id} @patch("api.dependencies.shared_services.SharedServiceRepository.get_shared_service_by_id", return_value=sample_shared_service(SHARED_SERVICE_ID)) async def test_patch_shared_service_with_invalid_field_returns_422(self, _, app, client): shared_service_patch = {"fakeField": "someValue", "templateVersion": "0.2.0"} response = await client.patch(app.url_path_for(strings.API_UPDATE_SHARED_SERVICE, shared_service_id=SHARED_SERVICE_ID), json=shared_service_patch, headers={"etag": ETAG}) assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY assert response.text == "[{'loc': ('body', 'fakeField'), 'msg': 'extra fields not permitted', 'type': 'value_error.extra'}]"
AzureTRE/api_app/tests_ma/test_api/test_routes/test_shared_services.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_api/test_routes/test_shared_services.py", "repo_id": "AzureTRE", "token_count": 7928 }
101
import pytest import pytest_asyncio from mock import patch from models.domain.user_resource_template import UserResourceTemplate from db.repositories.resource_templates import ResourceTemplateRepository from db.errors import EntityDoesNotExist, InvalidInput from models.domain.resource import ResourceType from models.domain.resource_template import ResourceTemplate from models.schemas.workspace_template import WorkspaceTemplateInCreate pytestmark = pytest.mark.asyncio @pytest_asyncio.fixture async def resource_template_repo(): with patch('api.dependencies.database.Database.get_container_proxy', return_value=None): resource_template_repo = await ResourceTemplateRepository().create() yield resource_template_repo def sample_resource_template_as_dict(name: str, version: str = "1.0", resource_type: ResourceType = ResourceType.Workspace) -> ResourceTemplate: return ResourceTemplate( id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb", name=name, description="test", version=version, resourceType=resource_type, current=False, properties={}, customActions=[], required=[] ).dict() @patch('db.repositories.resource_templates.ResourceTemplateRepository.save_item') @patch('uuid.uuid4') async def test_create_workspace_template_succeeds_without_required(uuid_mock, save_item_mock, resource_template_repo): uuid_mock.return_value = "1234" expected_type = ResourceType.Workspace input_workspace_template = WorkspaceTemplateInCreate( name="my-tre-workspace", version="0.0.1", current=True, json_schema={ "title": "My Workspace Template", "description": "This is a test workspace template schema.", "properties": { "updateable_property": { "type": "string", "title": "Test updateable property", "updateable": True, }, }, }, customActions=[], ) returned_template = await resource_template_repo.create_template(input_workspace_template, expected_type) expected_resource_template = ResourceTemplate( id="1234", name=input_workspace_template.name, title=input_workspace_template.json_schema["title"], description=input_workspace_template.json_schema["description"], version=input_workspace_template.version, resourceType=expected_type, properties=input_workspace_template.json_schema["properties"], customActions=input_workspace_template.customActions, required=[], authorizedRoles=[], current=input_workspace_template.current ) save_item_mock.assert_called_once_with(expected_resource_template) assert expected_resource_template == returned_template @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_by_name_and_version_queries_db(query_mock, resource_template_repo): expected_query = 'SELECT * FROM c WHERE c.resourceType = "workspace" AND c.name = "test" AND c.version = "1.0"' query_mock.return_value = [sample_resource_template_as_dict(name="test", version="1.0")] await resource_template_repo.get_template_by_name_and_version(name="test", version="1.0", resource_type=ResourceType.Workspace) query_mock.assert_called_once_with(query=expected_query) @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_by_name_and_version_returns_matching_template(query_mock, resource_template_repo): template_name = "test" template_version = "1.0" workspace_templates_in_db = [sample_resource_template_as_dict(name=template_name, version=template_version)] query_mock.return_value = workspace_templates_in_db template = await resource_template_repo.get_template_by_name_and_version(name=template_name, version=template_version, resource_type=ResourceType.Workspace) assert template.name == template_name @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_by_name_and_version_raises_entity_does_not_exist_if_no_template_found(query_mock, resource_template_repo): template_name = "test" template_version = "1.0" query_mock.return_value = [] with pytest.raises(EntityDoesNotExist): await resource_template_repo.get_template_by_name_and_version(name=template_name, version=template_version, resource_type=ResourceType.Workspace) @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_current_by_name_queries_db(query_mock, resource_template_repo): template_name = "template1" expected_query = 'SELECT * FROM c WHERE c.resourceType = "workspace" AND c.name = "template1" AND c.current = true' query_mock.return_value = [sample_resource_template_as_dict(name="test")] await resource_template_repo.get_current_template(template_name=template_name, resource_type=ResourceType.Workspace) query_mock.assert_called_once_with(query=expected_query) @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_current_by_name_returns_matching_template(query_mock, resource_template_repo): template_name = "template1" query_mock.return_value = [sample_resource_template_as_dict(name=template_name)] template = await resource_template_repo.get_current_template(template_name=template_name, resource_type=ResourceType.Workspace) assert template.name == template_name @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_current_by_name_raises_entity_does_not_exist_if_no_template_found(query_mock, resource_template_repo): query_mock.return_value = [] with pytest.raises(EntityDoesNotExist): await resource_template_repo.get_current_template(template_name="template1", resource_type=ResourceType.Workspace) @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_templates_information_returns_unique_template_names(query_mock, resource_template_repo): query_mock.return_value = [ {"name": "template1", "title": "title1", "description": "description1"}, {"name": "template2", "title": "title2", "description": "description2"} ] result = await resource_template_repo.get_templates_information(ResourceType.Workspace) assert len(result) == 2 assert result[0].name == "template1" assert result[1].name == "template2" @patch('db.repositories.resource_templates.ResourceTemplateRepository.query') async def test_get_templates_information_returns_only_templates_user_can_access(query_mock, resource_template_repo): query_mock.return_value = [ # Will get filtered out as don't have admin role {"name": "template1", "title": "title1", "description": "description1", "authorizedRoles": ["admin"]}, # Will get included as authorizedRoles=[] means any role is accepted {"name": "template2", "title": "title2", "description": "description2", "authorizedRoles": []}, # Will get included as have test role {"name": "template3", "title": "title3", "description": "description3", "authorizedRoles": ["test"]} ] result = await resource_template_repo.get_templates_information(ResourceType.Workspace, ["test"]) assert len(result) == 2 assert result[0].name == "template2" assert result[1].name == "template3" @patch('db.repositories.resource_templates.ResourceTemplateRepository.save_item') @patch('uuid.uuid4') async def test_create_workspace_template_item_calls_create_item_with_the_correct_parameters(uuid_mock, save_item_mock, resource_template_repo, input_workspace_template): uuid_mock.return_value = "1234" returned_template = await resource_template_repo.create_template(input_workspace_template, ResourceType.Workspace) expected_resource_template = ResourceTemplate( id="1234", name=input_workspace_template.name, title=input_workspace_template.json_schema["title"], description=input_workspace_template.json_schema["description"], version=input_workspace_template.version, resourceType=ResourceType.Workspace, properties=input_workspace_template.json_schema["properties"], allOf=input_workspace_template.json_schema["allOf"], customActions=input_workspace_template.customActions, required=input_workspace_template.json_schema["required"], current=input_workspace_template.current ) save_item_mock.assert_called_once_with(expected_resource_template) assert expected_resource_template == returned_template @patch('db.repositories.resource_templates.ResourceTemplateRepository.save_item') @patch('uuid.uuid4') async def test_create_item_created_with_the_expected_type(uuid_mock, save_item_mock, resource_template_repo, input_workspace_template): uuid_mock.return_value = "1234" expected_type = ResourceType.WorkspaceService returned_template = await resource_template_repo.create_template(input_workspace_template, expected_type) expected_resource_template = ResourceTemplate( id="1234", name=input_workspace_template.name, title=input_workspace_template.json_schema["title"], description=input_workspace_template.json_schema["description"], version=input_workspace_template.version, resourceType=expected_type, properties=input_workspace_template.json_schema["properties"], allOf=input_workspace_template.json_schema["allOf"], customActions=input_workspace_template.customActions, required=input_workspace_template.json_schema["required"], current=input_workspace_template.current ) save_item_mock.assert_called_once_with(expected_resource_template) assert expected_resource_template == returned_template @patch('db.repositories.resource_templates.ResourceTemplateRepository.save_item') @patch('uuid.uuid4') async def test_create_item_with_pipeline_succeeds(uuid_mock, save_item_mock, resource_template_repo, input_user_resource_template): uuid_mock.return_value = "1234" expected_type = ResourceType.UserResource # add the pipeline block pipeline = { "upgrade": [], "install": [], "uninstall": [] } input_user_resource_template.json_schema["pipeline"] = pipeline returned_template = await resource_template_repo.create_template(input_user_resource_template, expected_type) expected_resource_template = UserResourceTemplate( id="1234", name=input_user_resource_template.name, title=input_user_resource_template.json_schema["title"], description=input_user_resource_template.json_schema["description"], version=input_user_resource_template.version, resourceType=expected_type, properties=input_user_resource_template.json_schema["properties"], customActions=input_user_resource_template.customActions, required=input_user_resource_template.json_schema["required"], current=input_user_resource_template.current, pipeline=pipeline, parentWorkspaceService="" ) save_item_mock.assert_called_once_with(expected_resource_template) assert expected_resource_template == returned_template @pytest.mark.parametrize( "pipeline", [ { "install": [{"stepId": "1"}, {"stepId": "1"}], "upgrade": [{"stepId": "main"}, {"stepId": "2"}], }, { "install": [{"stepId": "main"}, {"stepId": "1"}], "upgrade": [{"stepId": "main"}, {"stepId": "1"}], }, { "install": [{"stepId": "main"}, {"stepId": "1"}], "upgrade": [{"stepId": "main"}, {"stepId": "main"}], }, ], ) async def test_create_template_with_pipeline_that_has_duplicated_step_id_fails_with_invalid_input_error(resource_template_repo, input_user_resource_template, pipeline): input_user_resource_template.json_schema["pipeline"] = pipeline with pytest.raises(InvalidInput): await resource_template_repo.create_template(input_user_resource_template, ResourceType.UserResource) @patch('db.repositories.resource_templates.ResourceTemplateRepository.save_item') async def test_create_template_with_pipeline_without_duplicated_step_id_succeeds(_, resource_template_repo, input_user_resource_template): input_user_resource_template.json_schema["pipeline"] = { "install": [{"stepId": "main"}, {"stepId": "1"}], "upgrade": [{"stepId": "main"}, {"stepId": "2"}], } created = await resource_template_repo.create_template(input_user_resource_template, ResourceType.UserResource) assert created.pipeline @patch('db.repositories.resource_templates.ResourceTemplateRepository.save_item') async def test_create_template_with_null_pipeline_creates_template_without_pipeline(_, resource_template_repo, input_user_resource_template): input_user_resource_template.json_schema["pipeline"] = None created = await resource_template_repo.create_template(input_user_resource_template, ResourceType.UserResource) assert created.pipeline is None
AzureTRE/api_app/tests_ma/test_db/test_repositories/test_resource_templates_repository.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_db/test_repositories/test_resource_templates_repository.py", "repo_id": "AzureTRE", "token_count": 4827 }
102
from fastapi import HTTPException, status import pytest import pytest_asyncio import time from resources import strings from services.airlock import validate_user_allowed_to_access_storage_account, get_required_permission, \ validate_request_status, cancel_request, delete_review_user_resource, check_email_exists from models.domain.airlock_request import AirlockRequest, AirlockRequestStatus, AirlockRequestType, AirlockReview, AirlockReviewDecision, AirlockActions, AirlockReviewUserResource from tests_ma.test_api.conftest import create_workspace_owner_user, create_workspace_researcher_user, get_required_roles from mock import AsyncMock, patch, MagicMock from models.domain.events import AirlockNotificationData, AirlockNotificationUserData, StatusChangedData, \ AirlockNotificationRequestData, AirlockNotificationWorkspaceData, AirlockFile from services.airlock import save_and_publish_event_airlock_request, \ update_and_publish_event_airlock_request, get_airlock_requests_by_user_and_workspace, get_allowed_actions from db.repositories.airlock_requests import AirlockRequestRepository from models.domain.workspace import Workspace from tests_ma.test_api.conftest import create_test_user, create_workspace_airlock_manager_user from azure.eventgrid import EventGridEvent from api.routes.airlock import create_airlock_review, create_cancel_request, create_submit_request from services.aad_authentication import AzureADAuthorization WORKSPACE_ID = "abc000d3-82da-4bfc-b6e9-9a7853ef753e" AIRLOCK_REQUEST_ID = "5dbc15ae-40e1-49a5-834b-595f59d626b7" AIRLOCK_REVIEW_ID = "96d909c5-e913-4c05-ae53-668a702ba2e5" USER_RESOURCE_ID = "cce59042-1dee-42dc-9388-6db846feeb3b" WORKSPACE_SERVICE_ID = "30f2fefa-e7bb-4e5b-93aa-e50bb037502a" CURRENT_TIME = time.time() ALL_ROLES = AzureADAuthorization.WORKSPACE_ROLES_DICT.keys() @pytest_asyncio.fixture async def airlock_request_repo_mock(no_database): _ = no_database airlock_request_repo_mock = await AirlockRequestRepository().create() yield airlock_request_repo_mock def sample_workspace(): return Workspace( id=WORKSPACE_ID, templateName='template name', templateVersion='1.0', etag='', properties={ "client_id": "12345", "display_name": "my research workspace", "description": "for science!"}, resourcePath="test") def sample_airlock_request(status=AirlockRequestStatus.Draft): airlock_request = AirlockRequest( id=AIRLOCK_REQUEST_ID, workspaceId=WORKSPACE_ID, type=AirlockRequestType.Import, reviewUserResources={"user-guid-here": sample_airlock_user_resource_object()}, files=[AirlockFile( name="data.txt", size=5 )], businessJustification="some test reason", status=status, createdWhen=CURRENT_TIME, createdBy=AirlockNotificationUserData( name="John Doe", email="john@example.com" ), updatedWhen=CURRENT_TIME, updatedBy=AirlockNotificationUserData( name="Test User", email="test@user.com" ) ) return airlock_request def sample_airlock_user_resource_object(): return AirlockReviewUserResource( workspaceId=WORKSPACE_ID, workspaceServiceId=WORKSPACE_SERVICE_ID, userResourceId=USER_RESOURCE_ID ) def sample_status_changed_event(new_status="draft", previous_status=None): status_changed_event = EventGridEvent( event_type="statusChanged", data=StatusChangedData(request_id=AIRLOCK_REQUEST_ID, new_status=new_status, previous_status=previous_status, type=AirlockRequestType.Import, workspace_id=WORKSPACE_ID[-4:]).__dict__, subject=f"{AIRLOCK_REQUEST_ID}/statusChanged", data_version="2.0" ) return status_changed_event def sample_airlock_notification_event(status="draft"): status_changed_event = EventGridEvent( event_type="airlockNotification", data=AirlockNotificationData( event_type="status_changed", recipient_emails_by_role={"workspace_researcher": ["researcher@outlook.com"], "workspace_owner": ["owner@outlook.com"], "airlock_manager": ["manager@outlook.com"]}, request=AirlockNotificationRequestData( id=AIRLOCK_REQUEST_ID, created_when=CURRENT_TIME, created_by=AirlockNotificationUserData( name="John Doe", email="john@example.com" ), updated_when=CURRENT_TIME, updated_by=AirlockNotificationUserData( name="Test User", email="test@user.com" ), request_type=AirlockRequestType.Import, files=[AirlockFile( name="data.txt", size=5 )], status=status, business_justification="some test reason" ), workspace=AirlockNotificationWorkspaceData( id=WORKSPACE_ID, display_name="my research workspace", description="for science!" )), subject=f"{AIRLOCK_REQUEST_ID}/airlockNotification", data_version="4.0" ) return status_changed_event def sample_airlock_review(review_decision=AirlockReviewDecision.Approved): airlock_review = AirlockReview( id=AIRLOCK_REVIEW_ID, reviewDecision=review_decision, decisionExplanation="test explanation" ) return airlock_review def test_validate_user_is_allowed_to_access_sa_blocks_access_as_expected(): airlock_manager_user = create_workspace_airlock_manager_user() draft_airlock_request = sample_airlock_request() with pytest.raises(HTTPException) as ex: validate_user_allowed_to_access_storage_account( user=airlock_manager_user, airlock_request=draft_airlock_request ) assert ex.value.status_code == status.HTTP_403_FORBIDDEN researcher_user = create_workspace_researcher_user() review_airlock_request = sample_airlock_request(AirlockRequestStatus.InReview) with pytest.raises(HTTPException) as ex: validate_user_allowed_to_access_storage_account( user=researcher_user, airlock_request=review_airlock_request ) assert ex.value.status_code == status.HTTP_403_FORBIDDEN def test_validate_user_is_allowed_to_access_grants_access_to_user_with_a_valid_role(): ws_owner_user = create_workspace_owner_user() draft_airlock_request = sample_airlock_request(AirlockRequestStatus.InReview) assert (validate_user_allowed_to_access_storage_account( user=ws_owner_user, airlock_request=draft_airlock_request) is None) researcher_user = create_workspace_researcher_user() review_airlock_request = sample_airlock_request(AirlockRequestStatus.Approved) assert ( validate_user_allowed_to_access_storage_account( user=researcher_user, airlock_request=review_airlock_request ) is None) @pytest.mark.parametrize('airlock_status', [AirlockRequestStatus.ApprovalInProgress, AirlockRequestStatus.RejectionInProgress, AirlockRequestStatus.BlockingInProgress]) def test_validate_request_status_raises_error_for_in_progress_request(airlock_status): airlock_request = sample_airlock_request(airlock_status) with pytest.raises(HTTPException) as ex: validate_request_status(airlock_request) assert ex.value.status_code == status.HTTP_400_BAD_REQUEST assert ex.value.detail == strings.AIRLOCK_REQUEST_IN_PROGRESS def test_validate_request_status_raises_error_for_canceled_request(): airlock_request = sample_airlock_request(AirlockRequestStatus.Cancelled) with pytest.raises(HTTPException) as ex: validate_request_status(airlock_request) assert ex.value.status_code == status.HTTP_400_BAD_REQUEST assert ex.value.detail == strings.AIRLOCK_REQUEST_IS_CANCELED @pytest.mark.parametrize('airlock_status', [AirlockRequestStatus.Failed, AirlockRequestStatus.Rejected, AirlockRequestStatus.Blocked]) def test_validate_request_status_raises_error_for_unaccessible_request(airlock_status): airlock_request = sample_airlock_request(airlock_status) with pytest.raises(HTTPException) as ex: validate_request_status(airlock_request) assert ex.value.status_code == status.HTTP_400_BAD_REQUEST assert ex.value.detail == strings.AIRLOCK_REQUEST_UNACCESSIBLE @pytest.mark.parametrize('airlock_status', [AirlockRequestStatus.Submitted, AirlockRequestStatus.InReview, AirlockRequestStatus.ApprovalInProgress, AirlockRequestStatus.Approved, AirlockRequestStatus.RejectionInProgress, AirlockRequestStatus.Rejected, AirlockRequestStatus.Cancelled, AirlockRequestStatus.BlockingInProgress, AirlockRequestStatus.Blocked]) def test_get_required_permission_return_read_only_permissions_for_non_draft_requests(airlock_status): airlock_request = sample_airlock_request(airlock_status) permissions = get_required_permission(airlock_request) assert permissions.write is False assert permissions.delete is False assert permissions.read is True assert permissions.list is True def test_get_required_permission_return_read_and_write_permissions_for_draft_requests(): airlock_request = sample_airlock_request(AirlockRequestStatus.Draft) permissions = get_required_permission(airlock_request) assert permissions.write is True assert permissions.delete is True assert permissions.list is True assert permissions.read is True @pytest.mark.asyncio @patch("event_grid.helpers.EventGridPublisherClient", return_value=AsyncMock()) @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details", return_value={"WorkspaceResearcher": ["researcher@outlook.com"], "WorkspaceOwner": ["owner@outlook.com"], "AirlockManager": ["manager@outlook.com"]}) @patch('services.airlock.get_timestamp', return_value=CURRENT_TIME) async def test_save_and_publish_event_airlock_request_saves_item(_, __, event_grid_publisher_client_mock, airlock_request_repo_mock): airlock_request_mock = sample_airlock_request() airlock_request_repo_mock.save_item = AsyncMock(return_value=None) status_changed_event_mock = sample_status_changed_event() airlock_notification_event_mock = sample_airlock_notification_event() event_grid_sender_client_mock = event_grid_publisher_client_mock.return_value event_grid_sender_client_mock.send = AsyncMock() await save_and_publish_event_airlock_request( airlock_request=airlock_request_mock, airlock_request_repo=airlock_request_repo_mock, user=create_test_user(), workspace=sample_workspace()) airlock_request_repo_mock.save_item.assert_called_once_with(airlock_request_mock) assert event_grid_sender_client_mock.send.call_count == 2 # Since the eventgrid object has the update time attribute which differs, we only compare the data that was sent actual_status_changed_event = event_grid_sender_client_mock.send.await_args_list[0].args[0][0] assert actual_status_changed_event.data == status_changed_event_mock.data actual_airlock_notification_event = event_grid_sender_client_mock.send.await_args_list[1].args[0][0] assert actual_airlock_notification_event.data == airlock_notification_event_mock.data @pytest.mark.asyncio @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details", return_value={"WorkspaceResearcher": ["researcher@outlook.com"], "WorkspaceOwner": ["owner@outlook.com"], "AirlockManager": ["manager@outlook.com"]}) async def test_save_and_publish_event_airlock_request_raises_503_if_save_to_db_fails(_, airlock_request_repo_mock): airlock_request_mock = sample_airlock_request() airlock_request_repo_mock.save_item = AsyncMock(side_effect=Exception) with pytest.raises(HTTPException) as ex: await save_and_publish_event_airlock_request( airlock_request=airlock_request_mock, airlock_request_repo=airlock_request_repo_mock, user=create_test_user(), workspace=sample_workspace()) assert ex.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE @pytest.mark.asyncio @patch("event_grid.helpers.EventGridPublisherClient", return_value=AsyncMock()) @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details", return_value={"WorkspaceResearcher": ["researcher@outlook.com"], "WorkspaceOwner": ["owner@outlook.com"], "AirlockManager": ["manager@outlook.com"]}) async def test_save_and_publish_event_airlock_request_raises_503_if_publish_event_fails(_, event_grid_publisher_client_mock, airlock_request_repo_mock): airlock_request_mock = sample_airlock_request() airlock_request_repo_mock.save_item = AsyncMock(return_value=None) # When eventgrid fails, it deletes the saved request airlock_request_repo_mock.delete_item = AsyncMock(return_value=None) event_grid_sender_client_mock = event_grid_publisher_client_mock.return_value event_grid_sender_client_mock.send = AsyncMock(side_effect=Exception) with pytest.raises(HTTPException) as ex: await save_and_publish_event_airlock_request( airlock_request=airlock_request_mock, airlock_request_repo=airlock_request_repo_mock, user=create_test_user(), workspace=sample_workspace()) assert ex.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE @pytest.mark.asyncio @pytest.mark.parametrize('role_assignment_details_mock_return', [{}, {"AirlockManager": ["owner@outlook.com"]}, {"WorkspaceResearcher": [], "AirlockManager": ["owner@outlook.com"]}, {"WorkspaceResearcher": ["researcher@outlook.com"], "owner_emails": []}, {"WorkspaceResearcher": ["researcher@outlook.com"]}]) async def test_check_email_exists_raises_417_if_email_not_present(role_assignment_details_mock_return): role_assignment_details = role_assignment_details_mock_return with pytest.raises(HTTPException) as ex: check_email_exists(role_assignment_details) assert ex.value.status_code == status.HTTP_417_EXPECTATION_FAILED @pytest.mark.asyncio @pytest.mark.parametrize('email_mock_return', [{}, {"AirlockManager": ["owner@outlook.com"]}, {"WorkspaceResearcher": [], "AirlockManager": ["owner@outlook.com"]}, {"WorkspaceResearcher": ["researcher@outlook.com"], "owner_emails": []}, {"WorkspaceResearcher": ["researcher@outlook.com"]}]) @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details") async def test_save_and_publish_event_airlock_request_raises_417_if_email_not_present(get_workspace_role_assignment_details_patched, email_mock_return): get_workspace_role_assignment_details_patched.return_value = email_mock_return airlock_request_mock = sample_airlock_request() with pytest.raises(HTTPException) as ex: await save_and_publish_event_airlock_request( airlock_request=airlock_request_mock, airlock_request_repo=None, user=create_test_user(), workspace=sample_workspace()) assert ex.value.status_code == status.HTTP_417_EXPECTATION_FAILED @pytest.mark.asyncio @patch("event_grid.helpers.EventGridPublisherClient", return_value=AsyncMock()) @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details", return_value={"WorkspaceResearcher": ["researcher@outlook.com"], "WorkspaceOwner": ["owner@outlook.com"], "AirlockManager": ["manager@outlook.com"]}) async def test_update_and_publish_event_airlock_request_updates_item(_, event_grid_publisher_client_mock, airlock_request_repo_mock): airlock_request_mock = sample_airlock_request() updated_airlock_request_mock = sample_airlock_request(status=AirlockRequestStatus.Submitted) status_changed_event_mock = sample_status_changed_event(new_status="submitted", previous_status="draft") airlock_notification_event_mock = sample_airlock_notification_event(status="submitted") airlock_request_repo_mock.update_airlock_request = AsyncMock(return_value=updated_airlock_request_mock) event_grid_sender_client_mock = event_grid_publisher_client_mock.return_value event_grid_sender_client_mock.send = AsyncMock() actual_updated_airlock_request = await update_and_publish_event_airlock_request( airlock_request=airlock_request_mock, airlock_request_repo=airlock_request_repo_mock, updated_by=create_test_user(), new_status=AirlockRequestStatus.Submitted, workspace=sample_workspace()) airlock_request_repo_mock.update_airlock_request.assert_called_once() assert (actual_updated_airlock_request == updated_airlock_request_mock) assert event_grid_sender_client_mock.send.call_count == 2 # Since the eventgrid object has the update time attribute which differs, we only compare the data that was sent actual_status_changed_event = event_grid_sender_client_mock.send.await_args_list[0].args[0][0] assert actual_status_changed_event.data == status_changed_event_mock.data actual_airlock_notification_event = event_grid_sender_client_mock.send.await_args_list[1].args[0][0] assert actual_airlock_notification_event.data == airlock_notification_event_mock.data @pytest.mark.asyncio @patch("services.airlock.send_status_changed_event") @patch("services.airlock.send_airlock_notification_event") @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details") async def test_update_and_publish_event_airlock_request_sends_status_changed_event(_, send_airlock_notification_event_mock, send_status_changed_event_mock, airlock_request_repo_mock): new_status = AirlockRequestStatus.Submitted airlock_request_repo_mock.update_airlock_request = AsyncMock() await update_and_publish_event_airlock_request( airlock_request=sample_airlock_request(), airlock_request_repo=airlock_request_repo_mock, updated_by=create_test_user(), new_status=new_status, workspace=sample_workspace()) assert send_status_changed_event_mock.call_count == 1 assert send_airlock_notification_event_mock.call_count == 1 @pytest.mark.asyncio @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details", return_value={"WorkspaceResearcher": ["researcher@outlook.com"], "WorkspaceOwner": ["owner@outlook.com"], "AirlockManager": ["manager@outlook.com"]}) async def test_update_and_publish_event_airlock_request_raises_400_if_status_update_invalid(_, airlock_request_repo_mock): airlock_request_mock = sample_airlock_request() with pytest.raises(HTTPException) as ex: await update_and_publish_event_airlock_request( airlock_request=airlock_request_mock, airlock_request_repo=airlock_request_repo_mock, updated_by=create_test_user(), new_status=AirlockRequestStatus.Approved, workspace=sample_workspace()) assert ex.value.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.asyncio @patch("event_grid.helpers.EventGridPublisherClient", return_value=AsyncMock()) @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details", return_value={"WorkspaceResearcher": ["researcher@outlook.com"], "WorkspaceOwner": ["owner@outlook.com"], "AirlockManager": ["manager@outlook.com"]}) async def test_update_and_publish_event_airlock_request_raises_503_if_publish_event_fails(_, event_grid_publisher_client_mock, airlock_request_repo_mock): airlock_request_mock = sample_airlock_request() updated_airlock_request_mock = sample_airlock_request(status=AirlockRequestStatus.Submitted) airlock_request_repo_mock.update_airlock_request = AsyncMock(return_value=updated_airlock_request_mock) event_grid_sender_client_mock = event_grid_publisher_client_mock.return_value event_grid_sender_client_mock.send = AsyncMock(side_effect=Exception) with pytest.raises(HTTPException) as ex: await update_and_publish_event_airlock_request( airlock_request=airlock_request_mock, airlock_request_repo=airlock_request_repo_mock, updated_by=create_test_user(), new_status=AirlockRequestStatus.Submitted, workspace=sample_workspace()) assert ex.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE @pytest.mark.asyncio @patch("services.airlock.send_status_changed_event") @patch("services.airlock.send_airlock_notification_event") @patch("services.aad_authentication.AzureADAuthorization.get_workspace_role_assignment_details") async def test_update_and_publish_event_airlock_request_without_status_change_should_not_send_status_changed_event(_, send_airlock_notification_event_mock, send_status_changed_event_mock, airlock_request_repo_mock): new_status = None airlock_request_repo_mock.update_airlock_request = AsyncMock() await update_and_publish_event_airlock_request( airlock_request=sample_airlock_request(), airlock_request_repo=airlock_request_repo_mock, updated_by=create_test_user(), new_status=new_status, workspace=sample_workspace()) assert send_status_changed_event_mock.call_count == 0 assert send_airlock_notification_event_mock.call_count == 0 @pytest.mark.asyncio async def test_get_airlock_requests_by_user_and_workspace_with_status_filter_calls_repo(airlock_request_repo_mock): workspace = sample_workspace() user = create_workspace_airlock_manager_user() airlock_request_repo_mock.get_airlock_requests = AsyncMock() await get_airlock_requests_by_user_and_workspace(user=user, workspace=workspace, airlock_request_repo=airlock_request_repo_mock, status=AirlockRequestStatus.InReview) airlock_request_repo_mock.get_airlock_requests.assert_called_once_with(workspace_id=workspace.id, creator_user_id=None, type=None, status=AirlockRequestStatus.InReview, order_by=None, order_ascending=True) @pytest.mark.asyncio @pytest.mark.parametrize("action, required_roles, airlock_request_repo_mock", [ (AirlockActions.Review, get_required_roles(endpoint=create_airlock_review), airlock_request_repo_mock), (AirlockActions.Cancel, get_required_roles(endpoint=create_cancel_request), airlock_request_repo_mock), (AirlockActions.Submit, get_required_roles(endpoint=create_submit_request), airlock_request_repo_mock)]) async def test_get_allowed_actions_requires_same_roles_as_endpoint(action, required_roles, airlock_request_repo_mock): airlock_request_repo_mock.validate_status_update = MagicMock(return_value=True) user = create_test_user() for role in required_roles: user.roles = [role] allowed_actions = get_allowed_actions(request=sample_airlock_request(), user=user, airlock_request_repo=airlock_request_repo_mock) assert action in allowed_actions @pytest.mark.asyncio @pytest.mark.parametrize("action, endpoint_roles, airlock_request_repo_mock", [ (AirlockActions.Review, get_required_roles(endpoint=create_airlock_review), airlock_request_repo_mock), (AirlockActions.Cancel, get_required_roles(endpoint=create_cancel_request), airlock_request_repo_mock), (AirlockActions.Submit, get_required_roles(endpoint=create_submit_request), airlock_request_repo_mock)]) async def test_get_allowed_actions_does_not_return_actions_that_are_forbidden_to_the_user_role(action, endpoint_roles, airlock_request_repo_mock): airlock_request_repo_mock.validate_status_update = MagicMock(return_value=True) user = create_test_user() forbidden_roles = [role for role in ALL_ROLES if role not in endpoint_roles] for forbidden_role in forbidden_roles: user.roles = [forbidden_role] allowed_actions = get_allowed_actions(request=sample_airlock_request(), user=user, airlock_request_repo=airlock_request_repo_mock) assert action not in allowed_actions @pytest.mark.asyncio @patch("services.airlock.delete_review_user_resource") @patch("services.airlock.update_and_publish_event_airlock_request") async def test_cancel_request_deletes_review_resource(_, delete_review_user_resource, airlock_request_repo_mock): await cancel_request( airlock_request=sample_airlock_request(), user=create_test_user(), airlock_request_repo=airlock_request_repo_mock, workspace=sample_workspace(), user_resource_repo=AsyncMock(), workspace_service_repo=AsyncMock(), resource_template_repo=AsyncMock(), operations_repo=AsyncMock(), resource_history_repo=AsyncMock()) delete_review_user_resource.assert_called_once() @pytest.mark.asyncio @patch("services.airlock.disable_user_resource") @patch("services.airlock.send_uninstall_message") @patch("services.airlock.update_and_publish_event_airlock_request") async def test_delete_review_user_resource_disables_the_resource_before_deletion(_, __, disable_user_resource): await delete_review_user_resource(user_resource=AsyncMock(), user_resource_repo=AsyncMock(), workspace_service_repo=AsyncMock(), resource_template_repo=AsyncMock(), operations_repo=AsyncMock(), resource_history_repo=AsyncMock(), user=create_test_user()) disable_user_resource.assert_called_once()
AzureTRE/api_app/tests_ma/test_services/test_airlock.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_services/test_airlock.py", "repo_id": "AzureTRE", "token_count": 11025 }
103
PHONY: pip-install install-cli build-package help: ## show this help @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) \ | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%s\033[0m|%s\n", $$1, $$2}' \ | column -t -s '|' pip-install: ## install required dependencides pip install -r requirements.txt install-cli: ## install CLI (note, run `source <(_TRE_COMPLETE=bash_source tre)` to set up bash completion) sudo rm -rf build dist tre.egg-info sudo pip install . build-package: ## build package ./scripts/build.sh
AzureTRE/cli/Makefile/0
{ "file_path": "AzureTRE/cli/Makefile", "repo_id": "AzureTRE", "token_count": 209 }
104
import logging import click from tre.api_client import ApiClient from tre.output import output, output_option, query_option from .contexts import WorkspaceServiceTemplateContext, pass_workspace_service_template_context from .user_resource_templates.user_resource_templates import user_resource_templates from .user_resource_templates.user_resource_template import user_resource_template def template_name_completion(ctx: click.Context, param: click.Parameter, incomplete: str): log = logging.getLogger(__name__) client = ApiClient.get_api_client_from_config() response = client.call_api(log, 'GET', '/api/workspace-service-templates') if response.is_success: names = [workspace["name"] for workspace in response.json()["templates"]] return [name for name in names if name.startswith(incomplete)] @click.group(name="workspace-service-template", invoke_without_command=True, help="Perform actions on an workspace-service-template") @click.argument('template_name', required=True, shell_complete=template_name_completion) @click.pass_context def workspace_service_template(ctx: click.Context, template_name) -> None: ctx.obj = WorkspaceServiceTemplateContext(template_name) @click.command(name="show", help="Show template") @output_option() @query_option() @pass_workspace_service_template_context def workspace_service_template_show(workspace_service_template_context: WorkspaceServiceTemplateContext, output_format, query) -> None: log = logging.getLogger(__name__) template_name = workspace_service_template_context.template_name if template_name is None: raise click.UsageError('Missing template name') client = ApiClient.get_api_client_from_config() response = client.call_api( log, 'GET', f'/api/workspace-service-templates/{template_name}', ) output(response, output_format=output_format, query=query, default_table_query=r"{id: id, name:name, title: title, version:version, description:description}") workspace_service_template.add_command(workspace_service_template_show) workspace_service_template.add_command(user_resource_template) workspace_service_template.add_command(user_resource_templates)
AzureTRE/cli/tre/commands/workspace_service_templates/workspace_service_template.py/0
{ "file_path": "AzureTRE/cli/tre/commands/workspace_service_templates/workspace_service_template.py", "repo_id": "AzureTRE", "token_count": 694 }
105
import click from tre.commands.workspaces.contexts import WorkspaceContext class WorkspaceServiceContext(object): def __init__(self, workspace_id: str, workspace_service_id: str): self.workspace_id = workspace_id self.workspace_service_id = workspace_service_id @staticmethod def add_service_id_to_context_obj(ctx: click.Context, workspace_service_id: str) -> "WorkspaceServiceContext": workspace_context = ctx.find_object(WorkspaceContext) return WorkspaceServiceContext(workspace_context.workspace_id, workspace_service_id) pass_workspace_service_context = click.make_pass_decorator(WorkspaceServiceContext) class WorkspaceServiceOperationContext(object): def __init__(self, workspace_id: str, workspace_service_id: str, operation_id: str): self.workspace_id = workspace_id self.workspace_service_id = workspace_service_id self.operation_id = operation_id @staticmethod def add_operation_id_to_context_obj(ctx: click.Context, operation_id: str) -> "WorkspaceServiceOperationContext": workspace_service_context = ctx.find_object(WorkspaceServiceContext) return WorkspaceServiceOperationContext(workspace_service_context.workspace_id, workspace_service_context.workspace_service_id, operation_id) pass_workspace_service_operation_context = click.make_pass_decorator(WorkspaceServiceOperationContext)
AzureTRE/cli/tre/commands/workspaces/workspace_services/contexts.py/0
{ "file_path": "AzureTRE/cli/tre/commands/workspaces/workspace_services/contexts.py", "repo_id": "AzureTRE", "token_count": 461 }
106
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/azure/azapi" { version = "1.9.0" constraints = "1.9.0" hashes = [ "h1:zaLH2Owmj61RX2G1Cy6VDy8Ttfzx+lDsSCyiu5cXkm4=", "zh:349569471fbf387feaaf8b88da1690669e201147c342f905e5eb03df42b3cf87", "zh:54346d5fb78cbad3eb7cfd96e1dd7ce4f78666cabaaccfec6ee9437476330018", "zh:64b799da915ea3a9a58ac7a926c6a31c59fd0d911687804d8e815eda88c5580b", "zh:9336ed9e112555e0fda8af6be9ba21478e30117d79ba662233311d9560d2b7c6", "zh:a8aace9897b28ea0b2dbd7a3be3df033e158af40412c9c7670be0956f216ed7e", "zh:ab23df7de700d9e785009a4ca9ceb38ae1ab894a13f5788847f15d018556f415", "zh:b4f13f0b13560a67d427c71c85246f8920f98987120341830071df4535842053", "zh:e58377bf36d8a14d28178a002657865ee17446182dac03525fd43435e41a1b5c", "zh:ea5db4acc6413fd0fe6b35981e58cdc9850f5f3118031cc3d2581de511aee6aa", "zh:f0b32c06c6bd4e4af2c02a62be07b947766aeeb09289a03f21aba16c2fd3c60f", "zh:f1518e766a90c257d7eb36d360dafaf311593a4a9352ff8db0bcfe0ed8cf45ae", "zh:fa89e84cff0776b5b61ff27049b1d8ed52040bd58c81c4628890d644a6fb2989", ] } provider "registry.terraform.io/hashicorp/azurerm" { version = "3.74.0" constraints = ">= 3.8.0, >= 3.16.0, 3.74.0" hashes = [ "h1:ETVZfmulZQ435+lgFCkZRpfVOLyAxfDOwbPXFg3aLLQ=", "zh:0424c70152f949da1ec52ba96d20e5fd32fd22d9bd9203ce045d5f6aab3d20fc", "zh:16dbf581d10f8e7937185bcdcceb4f91d08c919e452fb8da7580071288c8c397", "zh:3019103bc2c3b4e185f5c65696c349697644c968f5c085af5505fed6d01c4241", "zh:49bb56ebaed6653fdb913c2b2bb74fc8b5399e7258d1e89084f72c44ea1130dd", "zh:85547666517f899d88620bd23a000a8f43c7dc93587c350eb1ea17bcb3e645c7", "zh:8bed8b646ff1822d8764de68b56b71e5dd971a4b77eba80d47f400a530800bea", "zh:8bfa6c70c004ba05ebce47f74f49ce872c28a68a18bb71b281a9681bcbbdbfa1", "zh:a2ae9e38fda0695fb8aa810e4f1ce4b104bfda651a87923b307bb1728680d8b6", "zh:beac1efe32f99072c892095f5ff46e40d6852b66679a03bc3acbe1b90fb1f653", "zh:d8a6ca20e49ebe7ea5688d91233d571e2c2ccc3e41000c39a7d7031df209ea8e", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", "zh:f937b5fdf49b072c0347408d0a1c5a5d822dae1a23252915930e5a82d1d8ce8b", ] } provider "registry.terraform.io/hashicorp/http" { version = "3.2.1" constraints = "~> 3.2.0" hashes = [ "h1:DfxMa1zM/0NCFWN5PAxivSHJMNkOAFZvDYQkO72ZQmw=", "zh:088b3b3128034485e11dff8da16e857d316fbefeaaf5bef24cceda34c6980641", "zh:09ed1f2462ea4590b112e048c4af556f0b6eafc7cf2c75bb2ac21cd87ca59377", "zh:39c6b0b4d3f0f65e783c467d3f634e2394820b8aef907fcc24493f21dcf73ca3", "zh:47aab45327daecd33158a36c1a36004180a518bf1620cdd5cfc5e1fe77d5a86f", "zh:4d70a990aa48116ab6f194eef393082c21cf58bece933b63575c63c1d2b66818", "zh:65470c43fda950c7e9ac89417303c470146de984201fff6ef84299ea29e02d30", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:842b4dd63e438f5cd5fdfba1c09b8fdf268e8766e6690988ee24e8b25bfd9e8d", "zh:a167a057f7e2d80c78d4b4057538588131fceb983d5c93b07675ad9eb1aa5790", "zh:d0ba69b62b6db788cfe3cf8f7dc6e9a0eabe2927dc119d7fe3fe6573ee559e66", "zh:e28d24c1d5ff24b1d1cc6f0074a1f41a6974f473f4ff7a37e55c7b6dca68308a", "zh:fde8a50554960e5366fd0e1ca330a7c1d24ae6bbb2888137a5c83d83ce14fd18", ] } provider "registry.terraform.io/hashicorp/local" { version = "2.4.0" constraints = ">= 2.2.0, ~> 2.4.0" hashes = [ "h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=", "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9", "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf", "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35", "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04", "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406", "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6", "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7", "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2", "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc", "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce", ] } provider "registry.terraform.io/hashicorp/random" { version = "3.4.3" constraints = ">= 3.0.0, ~> 3.4.0" hashes = [ "h1:xZGZf18JjMS06pFa4NErzANI98qi59SEcBsOcS2P2yQ=", "zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752", "zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b", "zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3", "zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5", "zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda", "zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6", "zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1", "zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d", "zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8", "zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93", ] } provider "registry.terraform.io/hashicorp/template" { version = "2.2.0" constraints = ">= 2.2.0" hashes = [ "h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=", "zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386", "zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53", "zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603", "zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16", "zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776", "zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451", "zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae", "zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde", "zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d", "zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2", ] }
AzureTRE/core/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/core/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 3943 }
107
terraform { # In modules we should only specify the min version required_providers { azurerm = { source = "hashicorp/azurerm" version = ">= 3.8" } } }
AzureTRE/core/terraform/appgateway/main.tf/0
{ "file_path": "AzureTRE/core/terraform/appgateway/main.tf", "repo_id": "AzureTRE", "token_count": 76 }
108
# the zones defined in this file aren't used by the core system, # but are a preperation for shared/workspace services deployment. resource "azurerm_private_dns_zone" "non_core" { for_each = local.private_dns_zone_names_non_core name = module.terraform_azurerm_environment_configuration.private_links[each.key] resource_group_name = azurerm_resource_group.core.name tags = local.tre_core_tags lifecycle { ignore_changes = [tags] } } # since shared services are in the core network, their dns link could exist once and must be defined here. resource "azurerm_private_dns_zone_virtual_network_link" "mysql" { resource_group_name = azurerm_resource_group.core.name virtual_network_id = module.network.core_vnet_id private_dns_zone_name = azurerm_private_dns_zone.non_core["privatelink.mysql.database.azure.com"].name name = azurerm_private_dns_zone.non_core["privatelink.mysql.database.azure.com"].name registration_enabled = false tags = local.tre_core_tags lifecycle { ignore_changes = [tags] } } # Once the deployment of the app gateway is complete, we can proceed to include the required DNS zone for Nexus, which is dependent on the FQDN of the app gateway. resource "azurerm_private_dns_zone" "nexus" { name = "nexus-${module.appgateway.app_gateway_fqdn}" resource_group_name = azurerm_resource_group.core.name tags = local.tre_core_tags lifecycle { ignore_changes = [tags] } }
AzureTRE/core/terraform/dns_zones_non_core.tf/0
{ "file_path": "AzureTRE/core/terraform/dns_zones_non_core.tf", "repo_id": "AzureTRE", "token_count": 575 }
109
output "core_resource_group_name" { value = azurerm_resource_group.core.name } output "core_resource_group_location" { value = azurerm_resource_group.core.location } output "log_analytics_name" { value = module.azure_monitor.log_analytics_workspace_name } output "azure_tre_fqdn" { value = module.appgateway.app_gateway_fqdn } output "app_gateway_name" { value = module.appgateway.app_gateway_name } output "static_web_storage" { value = module.appgateway.static_web_storage } output "keyvault_name" { value = azurerm_key_vault.kv.name } output "keyvault_uri" { value = azurerm_key_vault.kv.vault_uri } output "service_bus_resource_id" { value = azurerm_servicebus_namespace.sb.id } output "service_bus_namespace_fqdn" { value = local.service_bus_namespace_fqdn } output "service_bus_workspace_queue" { value = azurerm_servicebus_queue.workspacequeue.name } output "service_bus_deployment_status_queue" { value = azurerm_servicebus_queue.service_bus_deployment_status_update_queue.name } output "state_store_resource_id" { value = azurerm_cosmosdb_account.tre_db_account.id } output "cosmosdb_mongo_resource_id" { value = azurerm_cosmosdb_account.mongo.id } output "state_store_endpoint" { value = azurerm_cosmosdb_account.tre_db_account.endpoint } output "cosmosdb_mongo_endpoint" { value = azurerm_cosmosdb_account.mongo.connection_strings[0] sensitive = true } output "state_store_account_name" { value = azurerm_cosmosdb_account.tre_db_account.name } output "cosmosdb_mongo_account_name" { value = azurerm_cosmosdb_account.mongo.name } output "app_insights_connection_string" { value = module.azure_monitor.app_insights_connection_string sensitive = true } # Make admin deployment values available in private.env output for easier local debugging output "mgmt_storage_account_name" { value = var.mgmt_storage_account_name } output "mgmt_resource_group_name" { value = var.mgmt_resource_group_name } output "terraform_state_container_name" { value = var.terraform_state_container_name } output "registry_server" { value = local.docker_registry_server } output "event_grid_status_changed_topic_endpoint" { value = module.airlock_resources.event_grid_status_changed_topic_endpoint } output "event_grid_airlock_notification_topic_endpoint" { value = module.airlock_resources.event_grid_airlock_notification_topic_endpoint } output "event_grid_status_changed_topic_resource_id" { value = module.airlock_resources.event_grid_status_changed_topic_resource_id } output "event_grid_airlock_notification_topic_resource_id" { value = module.airlock_resources.event_grid_airlock_notification_topic_resource_id } output "service_bus_step_result_queue" { value = module.airlock_resources.service_bus_step_result_queue }
AzureTRE/core/terraform/outputs.tf/0
{ "file_path": "AzureTRE/core/terraform/outputs.tf", "repo_id": "AzureTRE", "token_count": 1021 }
110
#!/bin/bash set -euo pipefail # Use this for debug only # set -o xtrace # AZURE_CORE_OUTPUT=jsonc # force CLI output to JSON for the script (user can still change default for interactive usage in the dev container) function show_usage() { cat << USAGE Utility script for creating an application administrator for TRE. This is mandatory and is used to manage AAD Application creation within TRE. This script is called when you run "make auth" and the environment variable AUTO_WORKSPACE_APP_REGISTRATION determines the permission this identity has. You must be logged in using Azure CLI with sufficient privileges to modify Azure Active Directory to run this script. Usage: $0 --name "MYTRE" --application-permission "Application.ReadWrite.OwnedBy" [--admin-consent] Options: -n,--name Required. The prefix for the app (registration) names e.g., "TRE". -a,--admin-consent Optional, but recommended. Grants admin consent for the app registrations, when this flag is set. Requires directory admin privileges to the Azure AD in question. -p,--application-permission The API Permission that this identity will be granted. -r,--reset-password Optional, switch to automatically reset the password. Default 0 USAGE exit 2 } if ! command -v az &> /dev/null; then echo "This script requires Azure CLI" 1>&2 exit 1 fi # Get the directory that this script is in DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" declare grantAdminConsent=0 declare resetPassword=0 declare currentUserId="" declare spId="" declare msGraphUri="" declare appName="" declare applicationPermission="Application.ReadWrite.OwnedBy" # Initialize parameters specified from command line while [[ $# -gt 0 ]]; do case "$1" in -n|--name) appName=$2 shift 2 ;; -a|--admin-consent) grantAdminConsent=1 shift 1 ;; -p|--application-permission) applicationPermission=$2 shift 2 ;; -r|--reset-password) resetPassword=$2 shift 2 ;; *) echo "Invalid option: $1." show_usage ;; esac done ################################### # CHECK INCOMMING PARAMETERS # ################################### if [[ $(az account list --only-show-errors -o json | jq 'length') -eq 0 ]]; then echo "Please run az login -t <tenant> --allow-no-subscriptions" exit 1 fi if [[ -z "$appName" ]]; then echo "Please specify the application name" 1>&2 show_usage fi appName="$appName Application Admin" currentUserId=$(az ad signed-in-user show --query 'id' --output tsv --only-show-errors) msGraphUri="$(az cloud show --query endpoints.microsoftGraphResourceId --output tsv)/v1.0" tenant=$(az rest -m get -u "${msGraphUri}/domains" -o json | jq -r '.value[] | select(.isDefault == true) | .id') echo -e "\e[96mCreating the Application Admin in the \"${tenant}\" Azure AD tenant.\e[0m" # Load in helper functions # shellcheck disable=SC1091 source "${DIR}/get_existing_app.sh" # shellcheck disable=SC1091 source "${DIR}/grant_admin_consent.sh" # shellcheck disable=SC1091 source "${DIR}/wait_for_new_app_registration.sh" # shellcheck disable=SC1091 source "${DIR}/get_msgraph_access.sh" # shellcheck disable=SC1091 source "${DIR}/create_or_update_service_principal.sh" # Get an existing object if it's been created before. appObjectId="" existingApp=$(get_existing_app --name "${appName}") || null if [ -n "${existingApp}" ]; then appObjectId=$(echo "${existingApp}" | jq -r '.id') fi # Get the Required Resource Scope/Role msGraphAppId="00000003-0000-0000-c000-000000000000" msGraphObjectId=$(az ad sp show --id ${msGraphAppId} --query "id" --output tsv --only-show-errors) # split permissions into array IFS=',' read -ra applicationPermissions <<< "${applicationPermission}" applicationPermissionIds=() roleApplicationPermissions=() for permission in "${applicationPermissions[@]}"; do # access each element of array applicationPermissionIds+=("$(az ad sp show --id "${msGraphAppId}" --query "appRoles[?value=='${permission}'].id" --output tsv --only-show-errors)") roleApplicationPermissions+=("$(get_msgraph_role "${permission}")") done printf -v roleApplicationPermissionsJson '%s,' "${roleApplicationPermissions[@]}" appDefinition=$(jq -c . << JSON { "displayName": "${appName}", "signInAudience": "AzureADMyOrg", "requiredResourceAccess": [ { "resourceAppId": "${msGraphAppId}", "resourceAccess": [ ${roleApplicationPermissionsJson%,} ] }] } JSON ) # Is the app already registered? if [[ -n ${appObjectId} ]]; then echo "Updating \"${appName}\" with ObjectId \"${appObjectId}\"" az rest --method PATCH --uri "${msGraphUri}/applications/${appObjectId}" --headers Content-Type=application/json --body "${appDefinition}" appId=$(az ad app show --id "${appObjectId}" --query "appId" --output tsv --only-show-errors) echo "App registration with ID \"${appId}\" updated" else echo "Creating a new app registration, \"${appName}\"" appId=$(az rest --method POST --uri "${msGraphUri}/applications" --headers Content-Type=application/json --body "${appDefinition}" --output tsv --query "appId") # Poll until the app registration is found in the listing. wait_for_new_app_registration "${appId}" fi # Make the current user an owner of the application. az ad app owner add --id "${appId}" --owner-object-id "$currentUserId" --only-show-errors # Create a Service Principal for the app. spPassword=$(create_or_update_service_principal "${appId}" "${resetPassword}") spId=$(az ad sp list --filter "appId eq '${appId}'" --query '[0].id' --output tsv --only-show-errors) # Grant admin consent on the required resource accesses (Graph API) if [[ $grantAdminConsent -eq 1 ]]; then echo "Granting admin consent for '${appName}' app (service principal ID ${spId}) - NOTE: Directory admin privileges required for this step" wait_for_new_service_principal "${spId}" for applicationPermissionId in "${applicationPermissionIds[@]}"; do # access each element of array grant_admin_consent "${spId}" "$msGraphObjectId" "${applicationPermissionId}" done fi # Set outputs in configuration file yq -i ".authentication.application_admin_client_id |= \"${appId}\"" config.yaml yq -i ".authentication.application_admin_client_secret |= \"${spPassword}\"" config.yaml echo "application_admin_client_id=\"${appId}\"" echo "application_admin_client_secret=\"${spPassword}\"" if [[ $grantAdminConsent -eq 0 ]]; then echo "NOTE: Make sure the API permissions of the app registrations have admin consent granted." echo "Run this script with flag -a to grant admin consent or configure the registrations in Azure Portal." echo "See APP REGISTRATIONS in documentation for more information." fi
AzureTRE/devops/scripts/aad/create_application_administrator.sh/0
{ "file_path": "AzureTRE/devops/scripts/aad/create_application_administrator.sh", "repo_id": "AzureTRE", "token_count": 2445 }
111
#!/bin/bash # This script is designed to be `source`d to create reusable helper functions function construct_tre_url() { tre_id=$1 location=$2 azure_environment=$3 declare -A cloudapp_endpoint_suffixes=( ["AzureCloud"]="cloudapp.azure.com" ["AzureUSGovernment"]="cloudapp.usgovcloudapi.net" ) domain=${cloudapp_endpoint_suffixes[${azure_environment}]} echo https://"${tre_id}"."${location}"."${domain}" }
AzureTRE/devops/scripts/construct_tre_url.sh/0
{ "file_path": "AzureTRE/devops/scripts/construct_tre_url.sh", "repo_id": "AzureTRE", "token_count": 145 }
112
#!/bin/bash set -o errexit set -o pipefail set -o nounset # Uncomment this line to see each command for debugging (careful: this will show secrets!) # set -o xtrace register_template() { local template_dir=$1 local bundle_name bundle_name=$(basename "$template_dir") local bundle_type=$2 local parent_bundle=$3 if [ -f "$template_dir"/porter.yaml ]; then if [ "$bundle_type" == "$parent_bundle" ]; then echo "Registering $bundle_type bundle $bundle_name" make "${bundle_type%s}"_bundle BUNDLE="$bundle_name" else echo "Registering user resource bundle $bundle_name for workspace service $parent_bundle" make user_resource_bundle BUNDLE="$bundle_name" WORKSPACE_SERVICE="$parent_bundle" fi fi } find ./templates -mindepth 1 -maxdepth 1 -type d | while read -r template_type_dir; do template_type=$(basename "$template_type_dir") echo "Registering $template_type" find "$template_type_dir" -mindepth 1 -maxdepth 1 -type d | while read -r template_dir; do template_name=$(basename "$template_dir") echo "Registering $template_name $template_type template" register_template "$template_dir" "$template_type" "$template_type" if [[ "$template_type" == "workspace_services" ]] && [ -d "$template_dir/user_resources" ]; then echo "Registering user resources for $template_name" find "$template_dir/user_resources" -mindepth 1 -maxdepth 1 -type d | while read -r user_resource_template_dir; do register_template "$user_resource_template_dir" "user_resource" "$template_name" done fi done done
AzureTRE/devops/scripts/publish_and_register_all_bundles.sh/0
{ "file_path": "AzureTRE/devops/scripts/publish_and_register_all_bundles.sh", "repo_id": "AzureTRE", "token_count": 563 }
113
# Introduction to Authentication and Authorization [Microsoft Entra ID](https://learn.microsoft.com/en-us/entra/fundamentals/whatis) is the backbone of Authentication and Authorization in the Trusted Research Environment. Microsoft Entra ID holds the identities of all the TRE/workspace users, including administrators, and connects the identities with applications which define the permissions for each user role. It is common that the Azure Administrator is not necessarily the Microsoft Entra ID Administrator. Due to this, this step may have to be carried out by a different individual/team. We have automated this into a simple command, but should you wish, you can run these steps manually. This page describes the automated Auth setup for TRE. ## Pre-requisites The automation utilises a `make` command, which reads a few environment variables and creates the Microsoft Entra ID assets. The following values are needed to be in place before you run the creation process. (`/config.yaml`) | Key | Description | | ----------- | ----------- | |TRE_ID|This is used to build up the name of the identities| |AAD_TENANT_ID|The tenant id of where your Microsoft Entra ID identities will be placed. This can be different to the tenant where your Azure resources are created.| | LOCATION | Where your Azure assets will be provisioned (eg. westeurope). This is used to add a redirect URI from the Swagger UI to the API Application.| |AUTO_WORKSPACE_APP_REGISTRATION| Default of `false`. Setting this to true grants the `Application.ReadWrite.All` and `Directory.Read.All` permission to the *Application Admin* identity. This identity is used to manage other Microsoft Entra ID applications that it owns, e.g. Workspaces. If you do not set this, the identity will have `Application.ReadWrite.OwnedBy`. Further information can be found&nbsp;[here](./identities/application_admin.md).| |AUTO_WORKSPACE_GROUP_CREATION| Default of `false`. Setting this to true grants the `Group.ReadWrite.All` permission to the *Application Admin* identity. This identity can then create security groups aligned to each applciation role. Microsoft Entra ID licencing implications need to be considered as Group assignment is a [premium feature](https://docs.microsoft.com/en-us/azure/architecture/multitenant-identity/app-roles#roles-using-azure-ad-app-roles).| ## Create Authentication assets You can build all of the Identity assets by running the following at the command line ```bash make auth ``` This will create five identities, and if successful will write the outputs to athentication section in `config.yaml` file. If you are building locally, these values will be used when building your TRE. If you are setting this up for CI/CD, then these values will be needed by your Build Orchestrator. The contents of your authentication section in `config.yaml` file should contain : | Variable | Description | | -------- | ----------- | | `APPLICATION_ADMIN_CLIENT_ID`| This client will administer Microsoft Entra ID Applications for TRE | | `APPLICATION_ADMIN_CLIENT_SECRET`| This client will administer Microsoft Entra ID Applications for TRE | | `TEST_ACCOUNT_CLIENT_ID`| This will be created by default, but can be disabled by editing `/devops/scripts/create_aad_assets.sh`. This is the user that will run the tests for you | | `TEST_ACCOUNT_CLIENT_SECRET` | This will be created by default, but can be disabled by editing `/devops/scripts/create_aad_assets.sh`. This is the user that will run the tests for you | | `API_CLIENT_ID` | API application (client) ID. | | `API_CLIENT_SECRET` | API application client secret. | | `SWAGGER_UI_CLIENT_ID` | Swagger (OpenAPI) UI application (client) ID. | | `WORKSPACE_API_CLIENT_ID` | Each workspace is secured behind it's own AD Application| | `WORKSPACE_API_CLIENT_SECRET` | Each workspace is secured behind it's own AD Application. This is the secret for that application.| ### Using a separate Microsoft Entra ID tenant !!! caution This section is only relevant it you are setting up a separate Microsoft Entra ID tenant for use. This is only recommended for development environments when you don't have the required permissions to register applications in Microsoft Entra ID. Using a separate Microsoft Entra ID tenant will prevent you from using certain Microsoft Entra ID integrated services. For production deployments, work with your Microsoft Entra ID administrator to perform the required registration 1. Create an Microsoft Entra ID tenant To create a new Microsoft Entra ID tenant, [follow the steps here](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-create-new-tenant) 1. Follow the steps outlined above. `make auth` should logon to the correct tenant. Make sure you logon back to the correct tenant before running `make all`. ## App registrations App registrations (represented by service principals) define the various access permissions to the TRE system. There are a total of five main Applications of interest. | Microsoft Entra ID Application | Description | | ----------- | ----------- | | TRE API application | This is the main application and used to secure access to the [TRE API](../tre-developers/api.md). | | TRE UX | This is the client application that will authenticate to the TRE/Workspace APIs. | | Application Admin | There are times when workspace services need to update the Microsoft Entra ID Application. For example, Guacamole needs to add a redirect URI to the Workspace Microsoft Entra ID Application. This identity is used to manage Microsoft Entra ID Applications. | | Automation App | This application is created so that you can run the tests or any CI/CD capability without the need to divulge a user password. This is particularly important if your tenant is MFA enabled. | | Workspace API | Typically you would have an application securing one or more workspaces that are created by TRE. | Some of the applications require **admin consent** to allow them to validate users against the Microsoft Entra ID. Check the Microsoft Docs on [Configure the admin consent workflow](https://learn.microsoft.com/en-us/entra/identity/enterprise-apps/configure-admin-consent-workflow) on how to request admin consent and handle admin consent requests. We strongly recommend that you use `make auth` to create the Microsoft Entra ID assets as this has been tested extensively. Should you wish to create these manually via the [Azure Portal](https://learn.microsoft.com/en-gb/entra/identity-platform/quickstart-register-app); more information can be found [here](./identities/auth-manual.md). ### Enabling users For a user to gain access to the system, they have to: 1. Have an identity in Microsoft Entra ID 1. Be linked with an app registration and assigned a role When these requirements are met, the user can sign-in using their credentials and use their privileges to use the API, login to workspace environment etc. based on their specific roles. ![User linked with app registrations](../assets/aad-user-linked-with-app-regs.png) The users can also be linked via the Enterprise application view: ![Adding users to Enterprise application](../assets/adding-users-to-enterprise-application.png)
AzureTRE/docs/tre-admins/auth.md/0
{ "file_path": "AzureTRE/docs/tre-admins/auth.md", "repo_id": "AzureTRE", "token_count": 1790 }
114
# Installing base workspace ## Publishing and registering the base workspace bundle Run the following in a terminal to build, publish and register the base workpace bundle: ```cmd make workspace_bundle BUNDLE=base ``` This will prepare the template for use with your TRE. ## Creating a base workspace Now that we have published and registered a base workspace bundle we can use the deployed API to create a base workspace. !!! info All routes are auth protected. Click the green **Authorize** button to receive a token for Swagger client. As explained in the [auth guide](../auth.md), every workspace has a corresponding app registration which if you haven't run `make auth`; can be created using the helper script `./devops/scripts/aad/create_workspace_application.sh`. For example: ```bash ./devops/scripts/aad/create_workspace_application.sh \ --name "${TRE_ID} - workspace 1" \ --admin-consent \ --ux-clientid "${SWAGGER_UI_CLIENT_ID}" \ --automation-clientid "${TEST_ACCOUNT_CLIENT_ID}" \ --application-admin-clientid "${APPLICATION_ADMIN_CLIENT_ID}" ``` !!! caution If you're using a separate tenant for Microsoft Entra ID app registrations to the one where you've deployed the TRE infrastructure resources, ensure you've signed into that tenant in the `az cli` before running the above command. See **Using a separate Microsoft Entra ID tenant** in [Setup Auth configuration](setup-auth-entities.md) for more details. Running the script will report `workspace_api_client_id` and `workspace_api_client_secret` for the generated app. Add these under the authenrication section in `/config.yaml` so that automated testing will work. You also need to use `workspace_api_client_id` in the POST body below. ### Create workspace using the API Go to `https://<azure_tre_fqdn>/api/docs` and use POST `/api/workspaces` with the sample body to create a base workspace. ```json { "templateName": "tre-workspace-base", "properties": { "display_name": "manual-from-swagger", "description": "workspace for team X", "client_id":"<WORKSPACE_API_CLIENT_ID>", "client_secret":"<WORKSPACE_API_CLIENT_SECRET>", "address_space_size": "medium" } } ``` The API will return an `operation` object with a `Location` header to query the operation status, as well as the `resourceId` and `resourcePath` properties to query the resource under creation. You can also follow the progress in Azure portal as various resources come up. Workspace level operations can now be carried out using the workspace API, at `/api/workspaces/<workspace_id>/docs/`. ## Next steps * [Installing a workspace service & user resources](./installing-workspace-service-and-user-resource.md)
AzureTRE/docs/tre-admins/setup-instructions/installing-base-workspace.md/0
{ "file_path": "AzureTRE/docs/tre-admins/setup-instructions/installing-base-workspace.md", "repo_id": "AzureTRE", "token_count": 790 }
115
# TRE API The TRE API is a service that users can interact with to request changes to workspaces e.g., to create, update, delete workspaces and workspace services inside each workspace. This page is a guide for a developer looking to make a change to the API and debug it. ## Repository folder structure ```text api_app ├── api - API implementation │ ├── dependencies - Dependencies for routes definition │ ├── errors - Definitions of error handlers │ └── routes - Web routes (API endpoints) │ ├── core - Application configuration, startup events, logging │ ├── db - Database related implementation │ ├── migrations - Manually written alembic migrations │ └── repositories - All CRUD features │ ├── models - Pydantic models for this application │ ├── domain - Main models that are used almost everywhere │ └── schemas - Schemas for using in web routes │ ├── resources - Strings that are used e.g., in web responses │ ├── services - Logic that is not only CRUD related (authentication, logging, tracing, etc) │ ├── tests_ma - Unit tests │ └── main.py - FastAPI application creation and configuration ``` ## Unit tests The unit tests are written with pytest and located in folder `/api_app/tests_ma/`. Run all unit tests with the following command in the root folder of the repository: ```cmd pytest --ignore=e2e_tests ``` Please refer to a different [guide on running E2E tests locally](end-to-end-tests.md). ## Local debugging To set up local debugging, first run (if you haven't done so already) ```cmd az login make setup-local-debugging ``` Next, go to "Run and Debug" panel in VSCode, and select TRE API. [![VSCode API debugging screenshot](../assets/api_local_debugging_vscode_screenshot.png)](../assets/api_local_debugging_vscode_screenshot.png) You will see a log similar to this: [![Local API debugging screenshot](../assets/api_local_debugging_log.png)](../assets/api_local_debugging_log.png) Now, you should be able to open Swagger UI for your local instance on [http://localhost:8000/api/docs](http://localhost:8000/api/docs). ## Cloud instance On Azure Portal, find an App Service instance named `app-${TRE_ID}`. ### API logs in LogAnalytics To find logs in LogAnalytics, go to your resource group, then to LogAnalytics instance, which is named like `log-${TRE_ID}`. There, you can run a query like ```cmd AppTraces | where AppRoleName == "api" | order by TimeGenerated desc ``` ### API logs using deployment center Check that the version you are debugging/troubleshooting is the same one deployed on the App Service. You can check this in Deployment Center, or follow the logs as generated by the container in the logs tabs. ![Deployment Center](../assets/api_deployment_center.png) ### Deploying a cloud instance To deploy a new version of the API to your TRE deployment, do this: 1. Update the version in `api_app/_version.py` 2. Run ```cmd make build-and-push-api make deploy-core ``` ## Using Swagger UI Swagger UI lets you send requests to the API. To send a request, click on the row with the request, then `Try out`, then `Execute`. See screenshot: [![Swagger UI Send Request](../assets/api_swagger_send_request.png)](../assets/api_swagger_send_request.png) ### Authorization Swagger UI is accessible under `https://${TRE_ID}.${LOCATION}.cloudapp.azure.com/api/docs`. To start using it, click Authorize button, then click Authorize (leave the field `client_secret` empty). See screenshot: [![Swagger UI Authorize](../assets/api_swagger_ui_authorize.png)](../assets/api_swagger_ui_authorize.png) If you see an error message saying something like: ```text The redirect URI 'https://tanyagts8.westeurope.cloudapp.azure.com/api/docs/oauth2-redirect' specified in the request does not match the redirect URIs configured for the application '558602a8-764a-453c-8efa-4dc3ddd61570'. ``` Then you'll need to update the redirect URI (see below). Otherwise, after you sign in on Azure Portal, the lock icon on Authorize button should look "locked". Then you can start executing queries. See also [setup instructions](../tre-admins/setup-instructions/deploying-azure-tre/#validate-the-deployment). All workspace operations are executed using a different URL: `https://${TRE_ID}.${LOCATION}.cloudapp.azure.com/api/workspaces/${WORKSPACE_ID}/docs`. See also [instructions on installing base workspace](../tre-admins/setup-instructions/installing-base-workspace). ### Updating the redirect URI If you get the following error: [![Swagger UI Auth Error](../assets/api_swagger_ui_auth_error.png)](../assets/api_swagger_ui_auth_error.png) You should run: ```cmd make auth ``` Alternatively, in Azure Portal you can add the redirect URL to the App Registration. Under Microsoft Entra ID, find App Registrations, and find the App Registration with the ID shown in the error message. There, go to Redirect URL and add the URL given to you by the error message (it will have a form of `https://${TRE_ID}.westeurope.cloudapp.azure.com/api/docs/oauth2-redirect`). ## Troubleshooting ### Wrong docker image version If this happens, you will see a log similar to this: `2022-05-10T05:34:48.844Z ERROR - DockerApiException: Docker API responded with status code=NotFound, response={"message":"manifest for tborisdevtreacr.azurecr.io/microsoft/azuretre/api:0.2.24 not found: manifest unknown: manifest tagged by \"0.2.24\" is not found"}` To fix, run `make build-and-push-api` from your branch and restart the instance. ### Investigating /api/health response The endpoint `/api/health` tracks health of not only API, but other components of the system too, and can help to narrow down any problems with your deployment: ```json { "services": [ { "service": "Cosmos DB", "status": "OK", "message": "" }, { "service": "Service Bus", "status": "OK", "message": "" }, { "service": "Resource Processor", "status": "Not OK", "message": "Resource Processor is not responding" } ] } ``` In this case, next step is to look at logs of Resource Processor. See also [Resource Processor docs](resource-processor.md).
AzureTRE/docs/tre-developers/api.md/0
{ "file_path": "AzureTRE/docs/tre-developers/api.md", "repo_id": "AzureTRE", "token_count": 2011 }
116
# Guacamole User Resource Service bundle (Windows) This is a User Resource Service template. It defines a Windows 10/Server 2019 VM to be used by TRE researchers and to be connected to using a [Guacamole server](https://guacamole.apache.org/). It blocks all inbound and outbound traffic to the internet and allows only RDP connections from within the vnet. ## Prerequisites - [A base workspace bundle installed](../workspaces/base.md) - [A guacamole workspace service bundle installed](../workspace-services/guacamole.md)
AzureTRE/docs/tre-templates/user-resources/guacamole-windows-vm.md/0
{ "file_path": "AzureTRE/docs/tre-templates/user-resources/guacamole-windows-vm.md", "repo_id": "AzureTRE", "token_count": 138 }
117
# Unrestricted workspace The unrestricted workspace template is a workspace template that allows for unrestricted access to the Internet from inside the workspace virtual network. This is useful for working on open data sets where data exfiltration is not a concern. This workspace template builds upon the base workspace template by adding additional firewall rules and disabling the airlock.
AzureTRE/docs/tre-templates/workspaces/unrestricted.md/0
{ "file_path": "AzureTRE/docs/tre-templates/workspaces/unrestricted.md", "repo_id": "AzureTRE", "token_count": 71 }
118
# Terms and Definitions Trusted Research Environments (TRE) enforce a secure boundary around distinct workspaces to enable information governance controls to be enforced. ![Concepts](../assets/treconcepts_updated.png) A Trusted Research Environment (typically one per organization, or one per department in large organizations) consist of: - One **Composition Service** (API, deployment engine etc. used to manage and deploy workspaces, workspace services and user resources) - One set of **Shared Services** used by all workspaces - A number of **Workspaces**, where each workspace is its own security boundary, and in turn contains **Workspace Services** and User Resources ## The Composition Service The Composition Service offers an abstraction over the lower-level Azure resources to allow for TRE users to provision resources in terms of workspaces and workspace services. The Composition Service reconciles the desired state with the actual state by invoking Azure resource deployments. The composition service is fronted by an API that helps the TRE Admin, TRE Workspace Owners and TRE Researchers create and manage the **workspaces** and **workspace services**. ## Shared Services A service provides one or more capabilities to you as a user of the TRE or to the TRE itself. Depending on the type of the service it is scoped to the environment and shared across all workspaces (Shared Service) or scoped to a specific workspace (Workspace Service). The types of services required for a research project varies greatly why extensibility is a key aspect of the Azure TRE solution. New services can be developed by you and your organization to fit your needs. **Shared Services** are services and resource shared by all workspaces. These services are created once, when the TRE is deployed and managed by the TRE Administrator. Examples of shared services are: - Firewall - Package Mirror - Git Mirror ## Workspace A **workspace** is a set of resources on a network, with inbound traffic restricted to authorised users, and outbound access restricted to defined network locations. The workspace is a security boundary and there should be zero transfer of data out from the workspace unless explicitly configured. Data transfer is not restricted within a workspace. The workspace itself contains only the bare essentials to provide this functionality, such as virtual network(s), storage etc. Workspaces can be enhanced with one or more building blocks called **workspace services** like Azure ML, Guacamole etc. to allow functionality such as development of machine learning models, data engineering, data analysis and software development. Multiple workspaces can be created within a single Trusted Research Environment to enable the required separation for your projects. Each workspace has [workspace users](../azure-tre-overview/user-roles.md): a **workspace owner** (normally only one), and one or more **workspace researchers** that can access the data and workspace services in the workspace. The workspace owner is also considered a workspace researcher. ## Workspace Service A workspace service is a service, created as a building block, with pre-configured set of resources that can be applied to a workspace. Examples of Workspace Services are: - Guacamole (Virtual Desktops) - Azure Machine Learning Unlike shared services, a workspace service is only accessible to the workspace users. Some workspace services, such as Guacamole, allow users to add on user-specific resources (user resources) All workspace services can be deployed to all workspaces. ## User Resource A user resource is a resource that is only available to a particular researcher. For example a virtual machine exposed by Guacamole. User resources can be deployed to workspaces with a compatible workspace service. E.g. Guacamole VMs can only be deployed to workspaces where the Guacamole workspace service is deployed. ## Templates In order to deploy resources (workspaces, workspace services, user resources), the resources have to be defined in templates. A template contains everything needed to create an instance of the resource. Ex. a base workspace template, or a Guacamole workspace service template. The templates describe the porter bundles used, and the input parameters needed to deploy them. To use a template, and deploy a resource, the template needs to be registered in the TRE. This is done using the TRE API. !!! tip Once a template is registered it can be used multiple times to deploy multiple workspaces, workspace services etc. If you want to author your own workspace, workspace service, or user resource template, consult the [template authoring guide](../tre-workspace-authors/authoring-workspace-templates.md)
AzureTRE/docs/using-tre/terms-definitions.md/0
{ "file_path": "AzureTRE/docs/using-tre/terms-definitions.md", "repo_id": "AzureTRE", "token_count": 1042 }
119
# Maintainers This document is targeted at maintainers of the AzureTRE project. For information on developing and contributing to AzureTRE, see the [TRE Developers docs](https://microsoft.github.io/AzureTRE/tre-developers/) ## PR Comment bot commands **Notes** - these commands are not immediate - you need to wait for the GitHub action that performs the task to start up. - builds triggered via these commands will use the workflow definitions from `main`. To test workflow changes before merging to `main`, the changes need to be pushed to a branch in the main repo and then the `deploy_tre_branch.yml` workflow can be run against that branch. These commands can only be run when commented by a user who is identified as a repo collaborator (see [granting access to run commands](#granting-access-to-run-commands)) ### `/help` This command will cause the pr-comment-bot to respond with a comment listing the available commands. ### `/test [<sha>]` This command runs the build, deploy, and smoke tests for a PR. For PRs from maintainers (i.e. users with write access to microsoft/AzureTRE), `/test` is sufficient. For other PRs, the checks below should be carried out. Once satisfied that the PR is safe to run tests against, you should use `/test <sha>` where `<sha>` is the SHA for the commit that you have verified. You can use the full or short form of the SHA, but it must be at least 7 characters (GitHub UI shows 7 characters). **IMPORTANT** This command works on PRs from forks, and makes the deployment secrets available. Before running tests on a PR, ensure that there are no changes in the PR that could have unintended consequences (e.g. leak secrets or perform undesirable operations in the testing subscription). Check for changes to anything that is run during the build/deploy/test cycle, including: - modifications to workflows (including adding new actions or changing versions of existing actions) - modifications to the Makefile - modifications to scripts - new python packages being installed ### `/test-extended [<sha>]` / `/test-extended-aad [<sha>]`/ `/test-shared-services [<sha>]` This command runs the build, deploy, and smoke & extended / shared services tests for a PR. For PRs from maintainers (i.e. users with write access to microsoft/AzureTRE), `/test-extended` is sufficient. If a change has been made which would affect any of the core shared services, make sure you run `/test-shared-services`. For other PRs, the checks below should be carried out. Once satisfied that the PR is safe to run tests against, you should use `/test-extended <sha>` where `<sha>` is the SHA for the commit that you have verified. You can use the full or short form of the SHA, but it must be at least 7 characters (GitHub UI shows 7 characters). **IMPORTANT** As with `/test`, this command works on PRs from forks, and makes the deployment secrets available. Before running tests on a PR, run the same checks on the PR code as for `/test`. ### `/test-destroy-env` When running `/test` multiple times on a PR, the same TRE ID and environment are used by default. The `/test-destroy-env` command destroys a previously created validation environment, allowing you to re-run `/test` with a clean starting point. The `/test-destroy-env` command also destroys the environment associated with the PR branch (created by running the `deploy_tre_branch` workflow). ### `/test-force-approve` This command skips running tests for a build and marks the checks as completed. This is intended to be used in scenarios where running the tests for a PR doesn't add value (for example, changing a workflow file that is always pulled from the default branch). ## Granting access to run commands Currently, the GitHub API to determine whether a user is a collaborator doesn't seem to respect permissions that a user is granted via a group. As a result, users need to be directly granted `write` permission in the repo to be able to run the comment bot commands. ## Periodic tasks ### [quarterly] Upgrade bundles' Terraform providers Each bundle is using Terraform providers to deploy itself. The providers are set with specific versions for stability and consistency between builds. This, however, requires us to manually update them by referencing newer versions in the provider blocks and associated lock files (`devops/scripts/upgrade_lock_files.sh` can help).
AzureTRE/maintainers.md/0
{ "file_path": "AzureTRE/maintainers.md", "repo_id": "AzureTRE", "token_count": 1094 }
120
#!/bin/bash set -e get_latest_release() { curl --silent "https://api.github.com/repos/$1/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/' } VERSION=${1:-"$(get_latest_release hashicorp/terraform)"} INSTALL_DIR=${2:-"$HOME/.local/bin"} CMD=terraform NAME=Terraform echo -e "\e[34m»»» 📦 \e[32mInstalling \e[33m$NAME v$VERSION\e[0m ..." curl -sSL "https://releases.hashicorp.com/terraform/${VERSION}/terraform_${VERSION}_linux_amd64.zip" -o /tmp/tf.zip unzip /tmp/tf.zip -d /tmp > /dev/null mkdir -p $INSTALL_DIR mv /tmp/terraform $INSTALL_DIR rm -f /tmp/tf.zip echo -e "\n\e[34m»»» 💾 \e[32mInstalled to: \e[33m$(which $CMD)" echo -e "\e[34m»»» 💡 \e[32mVersion details: \e[39m$($CMD --version)"
AzureTRE/resource_processor/scripts/terraform.sh/0
{ "file_path": "AzureTRE/resource_processor/scripts/terraform.sh", "repo_id": "AzureTRE", "token_count": 347 }
121
{ "definition": { "$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#", "actions": { "Initialize_message_variable": { "inputs": { "variables": [ { "name": "message", "type": "string" } ] }, "runAfter": { "Parse_JSON": [ "Succeeded" ] }, "type": "InitializeVariable" }, "Initialize_recipients_variable": { "inputs": { "variables": [ { "name": "recipients", "type": "array" } ] }, "runAfter": { "Initialize_message_variable": [ "Succeeded" ] }, "type": "InitializeVariable" }, "Parse_JSON": { "inputs": { "content": "@triggerOutputs()?['body']?['contentData']", "schema": { "properties": { "data": { "properties": { "event_type": { "type": "string" }, "request": { "type": "object", "properties": { "id": { "type": "string" }, "createdWhen": { "type": "number" }, "createdBy": { "type": "object", "properties": { "name": { "type": "string" }, "email": { "type": "string" } } }, "updatedWhen": { "type": "number" }, "updatedBy": { "type": "object", "properties": { "name": { "type": "string" }, "email": { "type": "string" } } }, "requestType": { "type": "string" }, "files": { "type": "array", "items": { "name": { "type": "string" }, "size": { "type": "number" } } }, "status": { "type": "string" }, "business_justification": { "type": "string" } } }, "workspace": { "type": "object", "properties": { "id": { "type": "string" }, "display_name": { "type": "string" }, "description": { "type": "string" } } }, "recipient_emails_by_role": { "type": "object", "properties": { "workspace_researcher": { "type": "array", "items": { "type": "string" } }, "workspace_owner": { "type": "array", "items": { "type": "string" } }, "airlock_manager": { "type": "array", "items": { "type": "string" } } } } }, "type": "object" } }, "type": "object" } }, "runAfter": {}, "type": "ParseJson" }, "Send_Email_with_SMTP": { "inputs": { "parameters": { "body": "<a href=\"@{parameters('tre_url')}/workspaces/@{body('Parse_JSON')?['data']?['workspace']?['id']}/requests/@{body('Parse_JSON')?['data']?['request']?['id']}\">View the request</a>", "from": "@parameters('smtp_from_email')", "importance": "Normal", "subject": "@variables('message')", "to": "@{join(variables('recipients'), ';')}" }, "serviceProviderConfiguration": { "connectionName": "Smtp", "operationId": "sendEmail", "serviceProviderId": "/serviceProviders/Smtp" } }, "runAfter": { "Switch_on_request_status": [ "Succeeded" ] }, "type": "ServiceProvider" }, "Succeeded": { "inputs": { "runStatus": "Succeeded" }, "runAfter": { "Send_Email_with_SMTP": [ "Succeeded" ] }, "type": "Terminate" }, "Switch_on_request_status": { "cases": { "Case_approved": { "actions": { "Set_approved_message": { "inputs": { "name": "message", "value": "Your Airlock request was approved" }, "runAfter": { "Set_recipients_as_researchers_emails": [ "Succeeded" ] }, "type": "SetVariable" }, "Set_recipients_as_researchers_emails": { "inputs": { "name": "recipients", "value": "@body('Parse_JSON')?['data']?['recipient_emails_by_role']?['workspace_researcher']" }, "runAfter": {}, "type": "SetVariable" } }, "case": "approved" }, "Case_in_review": { "actions": { "Set_in_review_message": { "inputs": { "name": "message", "value": "An Airlock request needs your review" }, "runAfter": { "Set_recipients_as_owners_emails": [ "Succeeded" ] }, "type": "SetVariable" }, "Set_recipients_as_owners_emails": { "inputs": { "name": "recipients", "value": "@body('Parse_JSON')?['data']?['recipient_emails_by_role']?['airlock_manager']" }, "runAfter": {}, "type": "SetVariable" } }, "case": "in_review" } }, "default": { "actions": { "Cancelled": { "inputs": { "runStatus": "Cancelled" }, "runAfter": {}, "type": "Terminate" } } }, "expression": "@body('Parse_JSON')?['data']?['request']?['status']", "runAfter": { "Initialize_recipients_variable": [ "Succeeded" ] }, "type": "Switch" } }, "contentVersion": "1.0.0.0", "outputs": {}, "triggers": { "When_messages_are_available_in_a_queue": { "inputs": { "parameters": { "isSessionsEnabled": false, "queueName": "notifications" }, "serviceProviderConfiguration": { "connectionName": "serviceBus", "operationId": "receiveQueueMessages", "serviceProviderId": "/serviceProviders/serviceBus" } }, "splitOn": "@triggerOutputs()?['body']", "type": "ServiceProvider" } } }, "kind": "Stateful" }
AzureTRE/templates/shared_services/airlock_notifier/app/AirlockNotifier/workflow.json/0
{ "file_path": "AzureTRE/templates/shared_services/airlock_notifier/app/AirlockNotifier/workflow.json", "repo_id": "AzureTRE", "token_count": 5732 }
122
variable "tre_id" { type = string description = "Unique TRE ID" } variable "tre_resource_id" { type = string description = "Resource ID" } variable "tre_url" { type = string description = "TRE URL" default = "" } variable "smtp_server_address" { type = string } variable "smtp_username" { type = string } variable "smtp_password" { type = string sensitive = true } variable "smtp_from_email" { type = string } variable "smtp_server_port" { type = string } variable "smtp_server_enable_ssl" { type = bool default = false }
AzureTRE/templates/shared_services/airlock_notifier/terraform/variables.tf/0
{ "file_path": "AzureTRE/templates/shared_services/airlock_notifier/terraform/variables.tf", "repo_id": "AzureTRE", "token_count": 235 }
123
# See https://microsoft.github.io/AzureTRE/tre-developers/letsencrypt/ resource "azurerm_storage_account" "staticweb" { name = local.staticweb_storage_name resource_group_name = data.azurerm_resource_group.rg.name location = data.azurerm_resource_group.rg.location account_kind = "StorageV2" account_tier = "Standard" account_replication_type = "LRS" enable_https_traffic_only = true allow_nested_items_to_be_public = false tags = local.tre_shared_service_tags static_website { index_document = "index.html" error_404_document = "404.html" } lifecycle { ignore_changes = [tags] } } resource "azurerm_role_assignment" "stgwriter" { scope = azurerm_storage_account.staticweb.id role_definition_name = "Storage Blob Data Contributor" principal_id = data.azurerm_user_assigned_identity.resource_processor_vmss_id.principal_id }
AzureTRE/templates/shared_services/certs/terraform/staticweb.tf/0
{ "file_path": "AzureTRE/templates/shared_services/certs/terraform/staticweb.tf", "repo_id": "AzureTRE", "token_count": 472 }
124
variable "tre_id" { type = string description = "Unique TRE ID" } variable "tre_resource_id" { type = string description = "Resource ID" } variable "gitea_allowed_fqdns" { type = string description = "comma seperated string of allowed FQDNs for Gitea" default = "github.com, www.github.com, api.github.com, git-lfs.github.com, *githubusercontent.com" } variable "gitea_storage_limit" { type = number description = "Space allocated in GB for the Gitea data in Azure Files Share" default = 1024 } variable "mgmt_resource_group_name" { type = string description = "Resource group name for TRE management" } variable "acr_name" { type = string description = "Name of Azure Container Registry" } variable "arm_environment" { type = string } variable "sql_sku" { type = string }
AzureTRE/templates/shared_services/gitea/terraform/variables.tf/0
{ "file_path": "AzureTRE/templates/shared_services/gitea/terraform/variables.tf", "repo_id": "AzureTRE", "token_count": 320 }
125
{ "name": "docker", "online": true, "storage": { "blobStoreName": "default", "strictContentTypeValidation": true, "write_policy": "ALLOW" }, "proxy": { "remoteUrl": "https://download.docker.com/linux/ubuntu/", "contentMaxAge": 1440, "metadataMaxAge": 1440 }, "negativeCache": { "enabled": true, "timeToLive": 1440 }, "httpClient": { "blocked": false, "autoBlock": false, "connection": { "retries": 0, "userAgentSuffix": "string", "timeout": 60, "enableCircularRedirects": false, "enableCookies": false, "useTrustStore": false } }, "apt": { "distribution": "bionic", "flat": false }, "baseType": "apt", "repoType": "proxy" }
AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config/docker_proxy_conf.json/0
{ "file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/nexus_repos_config/docker_proxy_conf.json", "repo_id": "AzureTRE", "token_count": 372 }
126
locals { short_service_id = substr(var.tre_resource_id, -4, -1) short_workspace_id = substr(var.workspace_id, -4, -1) core_vnet = "vnet-${var.tre_id}" core_resource_group_name = "rg-${var.tre_id}" workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}" service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}" workspace_name = lower("ml-${substr(local.service_resource_name_suffix, -30, -1)}") acr_name = lower(replace("acr${substr(local.service_resource_name_suffix, -8, -1)}", "-", "")) keyvault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}") storage_name = lower(replace("stg${substr(local.service_resource_name_suffix, -8, -1)}", "-", "")) tre_workspace_service_tags = { tre_id = var.tre_id tre_workspace_id = var.workspace_id tre_workspace_service_id = var.tre_resource_id } }
AzureTRE/templates/workspace_services/azureml/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/azureml/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 533 }
127
data "azurerm_resource_group" "ws" { name = "rg-${local.workspace_resource_name_suffix}" } data "azurerm_virtual_network" "ws" { name = "vnet-${local.workspace_resource_name_suffix}" resource_group_name = data.azurerm_resource_group.ws.name } data "azurerm_subnet" "aml" { name = "AMLSubnet${local.short_service_id}" virtual_network_name = data.azurerm_virtual_network.ws.name resource_group_name = data.azurerm_virtual_network.ws.resource_group_name } data "azurerm_machine_learning_workspace" "workspace" { name = local.aml_workspace_name resource_group_name = data.azurerm_resource_group.ws.name }
AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/terraform/data.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/terraform/data.tf", "repo_id": "AzureTRE", "token_count": 280 }
128
locals { databricks_subnets = cidrsubnets(var.address_space, 1, 1) container_subnet_address_space = local.databricks_subnets[0] # .0 - .127 host_subnet_address_space = local.databricks_subnets[1] # .128 - .254 short_service_id = substr(var.tre_resource_id, -4, -1) short_workspace_id = substr(var.workspace_id, -4, -1) workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}" service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}" resource_group_name = "rg-${var.tre_id}-ws-${local.short_workspace_id}" virtual_network_name = "vnet-${local.workspace_resource_name_suffix}" core_resource_group_name = "rg-${var.tre_id}" firewall_name = "fw-${var.tre_id}" databricks_workspace_name = "adb-${local.service_resource_name_suffix}" managed_resource_group_name = "rg-${local.service_resource_name_suffix}" host_subnet_name = "adb-host-subnet-${local.service_resource_name_suffix}" container_subnet_name = "adb-container-subnet-${local.service_resource_name_suffix}" network_security_group_name = "nsg-${local.service_resource_name_suffix}" route_table_name = "rt-${local.service_resource_name_suffix}" # databricks-udr.json was build according to this page https://learn.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/udr map_location_url_config = jsondecode(file("${path.module}/databricks-udr.json")) storage_name = lower(replace("stgdbfs${substr(local.service_resource_name_suffix, -8, -1)}", "-", "")) tre_workspace_service_tags = { tre_id = var.tre_id tre_workspace_id = var.workspace_id tre_workspace_service_id = var.tre_resource_id } }
AzureTRE/templates/workspace_services/databricks/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/databricks/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 827 }
129
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azurerm" { version = "3.22.0" constraints = "3.22.0" hashes = [ "h1:bxkMviG7vvNV2aPZQPall0mdIGOojsYeJvKbscPCZeM=", "zh:03441438f73965fef3a60582573dc9137baf3142d157f16a8c187f7995bf968e", "zh:1a45946e3ad479745e01eb28283beba4b7c63a94d29ccd3afa3adb8aac41ffa7", "zh:457352525d3744a9f5d809a68e61ba51ad022fa012d0f092f04e31730700977d", "zh:48c4ac83fbf5c7295ffe9b8f6a2f3e25d40361b53a8c77f1516973c714862805", "zh:48c503892d780977405b4ef23db55d1216bbe96a592de63769f827cf3d5e092a", "zh:5d5935681f91af8a44772262d7f6f1ed0a4b4e113236cc166559ff57b2c936c4", "zh:61377b5edefdfe96b160a10b1b86b6faef02b813ea7d3d9cbcd8bc664c3293ed", "zh:73b0696146afd6ff360138425973b3349cb2a45f13094a861d9c162c23e0d796", "zh:8b2178ca3e1618107a7d5d68f57ca239c68b70a60cdae1c0a3e3ba867282ba25", "zh:a4021c34ee777863f032425774485adab1d4aba10ce38eb415b5c3a3179423a4", "zh:c66daaf59d5750b1e49706ffa052cb4467280b0cb481fdd4f7618bb8b9d1edb1", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } provider "registry.terraform.io/hashicorp/local" { version = "2.2.3" hashes = [ "h1:aWp5iSUxBGgPv1UnV5yag9Pb0N+U1I0sZb38AXBFO8A=", "zh:04f0978bb3e052707b8e82e46780c371ac1c66b689b4a23bbc2f58865ab7d5c0", "zh:6484f1b3e9e3771eb7cc8e8bab8b35f939a55d550b3f4fb2ab141a24269ee6aa", "zh:78a56d59a013cb0f7eb1c92815d6eb5cf07f8b5f0ae20b96d049e73db915b238", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:8aa9950f4c4db37239bcb62e19910c49e47043f6c8587e5b0396619923657797", "zh:996beea85f9084a725ff0e6473a4594deb5266727c5f56e9c1c7c62ded6addbb", "zh:9a7ef7a21f48fabfd145b2e2a4240ca57517ad155017e86a30860d7c0c109de3", "zh:a63e70ac052aa25120113bcddd50c1f3cfe61f681a93a50cea5595a4b2cc3e1c", "zh:a6e8d46f94108e049ad85dbed60354236dc0b9b5ec8eabe01c4580280a43d3b8", "zh:bb112ce7efbfcfa0e65ed97fa245ef348e0fd5bfa5a7e4ab2091a9bd469f0a9e", "zh:d7bec0da5c094c6955efed100f3fe22fca8866859f87c025be1760feb174d6d9", "zh:fb9f271b72094d07cef8154cd3d50e9aa818a0ea39130bc193132ad7b23076fd", ] } provider "registry.terraform.io/hashicorp/random" { version = "3.4.3" constraints = "3.4.3" hashes = [ "h1:xZGZf18JjMS06pFa4NErzANI98qi59SEcBsOcS2P2yQ=", "zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752", "zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b", "zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3", "zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5", "zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda", "zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6", "zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1", "zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d", "zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8", "zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93", ] }
AzureTRE/templates/workspace_services/gitea/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/templates/workspace_services/gitea/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 2015 }
130
# The OpenID extension requires multiple parameters, by setting the 'enable-environment-properties', these parameters are fetched from env vars. # (see https://guacamole.apache.org/doc/gug/configuring-guacamole.html). The required parameters are defined here (https://guacamole.apache.org/doc/gug/openid-auth.html): # openid-authorization-endpoint, openid-jwks-endpoint, openid-issuer, openid-client-id and openid-redirect-uri. # We assume Azure deployment, so the first 3 are built using the given tenantId, the last one is built using the given web app name (only openid-client-id is passed explicitly) enable-environment-properties:true enable-websocket:true enable-sftp:false enable-drive:false disable-download:true disable-upload:true
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/guacamole/guacamole.properties/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/guacamole/guacamole.properties", "repo_id": "AzureTRE", "token_count": 216 }
131
/** * */ package org.apache.guacamole.auth.azuretre.connection;
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/connection/package-info.java/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/connection/package-info.java", "repo_id": "AzureTRE", "token_count": 25 }
132
# Local .terraform directories **/.terraform/* # TF backend files **/*_backend.tf # Porter files it uses for local build ./cnab/** Dockerfile.tmpl .env* terraform/deploy.sh terraform/destroy.sh
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/.dockerignore/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/.dockerignore", "repo_id": "AzureTRE", "token_count": 74 }
133
variable "workspace_id" { type = string } variable "tre_id" { type = string } variable "parent_service_id" { type = string } variable "tre_resource_id" { type = string } variable "image" { type = string } variable "vm_size" { type = string } variable "image_gallery_id" { type = string default = "" } variable "airlock_request_sas_url" { type = string }
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/variables.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-export-reviewvm/terraform/variables.tf", "repo_id": "AzureTRE", "token_count": 139 }
134
#!/bin/bash set -o errexit set -o pipefail set -o nounset # Uncomment this line to see each command for debugging (careful: this will show secrets!) # set -o xtrace # Get Docker Public key from Nexus curl -fsSL "${NEXUS_PROXY_URL}"/repository/docker-public-key/gpg | sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/docker-archive-keyring.gpg # Get Microsoft Public key from Nexus curl -fsSL "${NEXUS_PROXY_URL}"/repository/microsoft-keys/microsoft.asc | sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/microsoft.gpg
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/get_apt_keys.sh/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/get_apt_keys.sh", "repo_id": "AzureTRE", "token_count": 191 }
135
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azurerm" { version = "3.57.0" constraints = "3.57.0" hashes = [ "h1:SOBKU/ioGnpuQpAx6dgaD0EzfAM2W+uS9e6p59viSxs=", "zh:028202b0ae01f1262dac076b383cb68b5dd624977669b6db833418c215eb8401", "zh:26fcf9e9b73cb3bbf87a048361a89050d2e52bdc91190a305e624a62be26a3f4", "zh:2f381103953e4513068eee62089a0ec8c60a18ecef2235138b6c29a45920d6a2", "zh:376f016f4b449b2cf38f75e27e7a9157fdcfc925f28198124a30e316abb54f3d", "zh:7d491bab94d5aba91cd9c307dbd4b655dcdc0a6212541e7800b9a902be98befe", "zh:85fa7d8339efd15494f947cda02e9ed127eafa32652e568f54261b2e97d2b3ee", "zh:950e079e55a7e321adbd2f6a0639a4b3b0fac47d2e4bb3a12791e0817b694238", "zh:975260e09379c5c97cad3171327db2f0b4914909861d4c24ab784b0ecd79c54a", "zh:a26bb67ab2d2f20e5fee4d41110584af17357f4b4266d80f9debfad61fa0a4fd", "zh:da0e5d1ec301c69b6fae684e55059fc5e1b91699ed3696229f599d558401556b", "zh:ea11e62ce53caec240cb3a1da25d248805387fa246314001ed3e07e9105f6e12", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } provider "registry.terraform.io/hashicorp/random" { version = "3.5.1" constraints = "3.5.1" hashes = [ "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014", ] }
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-windowsvm/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 1347 }
136
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azurerm" { version = "2.97.0" constraints = "2.97.0" hashes = [ "h1:XxT+XM/leTXa21aTnJjPBfNBQ8cLE4gYDg01WEZsV1U=", "zh:0aac80e6d2b8ddf33d558ac893d52688e8abf8a0b995cfc3c35eb84afbf432a3", "zh:11191068cb732208ebc8662651782f63db329a25f7ea1cd50cd91622a2c247b7", "zh:36c8334194e7d605682053c7c70fbb2a650d9b0a7bcc44d5cdda4f205818438a", "zh:3a5e01276added995e875b42ecc6b36ff73d267f0c096c87195bd2b1fff4f5b2", "zh:557e38371657e6ed8aae9192d01480c4cca7c0f7ade6022f1aec247a6384922b", "zh:67b913c280c5858549477a4b05e77078b1a5234de77c7bddd4ee1e8e237d5665", "zh:7aeca864ce45b295db734cd968f7596ff12cd7c522ee89d53f432dae7c2b5d18", "zh:b6127d7a796eaf9756dd212667eb48f79c0e78729589ec8ccf68e0b36ebb4e54", "zh:bed448238740f897d1b399e5123b3a9eba256b981846f9ee92b71493446ca684", "zh:c351a1bba34c3bd06fff75e4c15e4db0456268479463c2471598068ea1c5c884", "zh:d073c24d0a4756e79b39f41f552d526800f9fb0ad0a74f742ac8de61b6416a3a", ] } provider "registry.terraform.io/hashicorp/external" { version = "2.3.1" hashes = [ "h1:bROCw6g5D/3fFnWeJ01L4IrdnJl1ILU8DGDgXCtYzaY=", "zh:001e2886dc81fc98cf17cf34c0d53cb2dae1e869464792576e11b0f34ee92f54", "zh:2eeac58dd75b1abdf91945ac4284c9ccb2bfb17fa9bdb5f5d408148ff553b3ee", "zh:2fc39079ba61411a737df2908942e6970cb67ed2f4fb19090cd44ce2082903dd", "zh:472a71c624952cff7aa98a7b967f6c7bb53153dbd2b8f356ceb286e6743bb4e2", "zh:4cff06d31272aac8bc35e9b7faec42cf4554cbcbae1092eaab6ab7f643c215d9", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:7ed16ccd2049fa089616b98c0bd57219f407958f318f3c697843e2397ddf70df", "zh:842696362c92bf2645eb85c739410fd51376be6c488733efae44f4ce688da50e", "zh:8985129f2eccfd7f1841ce06f3bf2bbede6352ec9e9f926fbaa6b1a05313b326", "zh:a5f0602d8ec991a5411ef42f872aa90f6347e93886ce67905c53cfea37278e05", "zh:bf4ab82cbe5256dcef16949973bf6aa1a98c2c73a98d6a44ee7bc40809d002b8", "zh:e70770be62aa70198fa899526d671643ff99eecf265bf1a50e798fc3480bd417", ] } provider "registry.terraform.io/hashicorp/local" { version = "2.4.0" constraints = "2.4.0" hashes = [ "h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=", "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9", "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf", "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35", "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04", "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406", "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6", "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7", "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2", "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc", "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce", ] } provider "registry.terraform.io/hashicorp/null" { version = "3.2.1" hashes = [ "h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=", "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840", "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb", "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5", "zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238", "zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc", "zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970", "zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2", "zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5", "zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f", "zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694", ] } provider "registry.terraform.io/hashicorp/random" { version = "3.4.2" constraints = "3.4.2" hashes = [ "h1:PIIfeOjmPoQRHfMM7MDr7qY3mQqD4F+38Dmq8pjvUUs=", "zh:1e61d226778aefd01c0e139c0ad709b61e9ae4b33d72301b922bd3d000b76eee", "zh:3c3295c3d2e9c3f9d60d557ee8faf2a30bd15f59f2c38ed13f50a3220dd027d0", "zh:6661b4953b875857c3ac99fb1006daf314acebf2d1748045d208ebc8cbc647cd", "zh:6e1823a349ceea5e4e0c684561473f57c46f73d7c197c39904d031ce6654bfb8", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:8f8e6fd15e5228f1935c63d79bf3074f645ddba1350756acfc968b2a05bf85ee", "zh:939a78da13a7932bd5429f0c77debe907bf9d6c6a26af50fd4d9f32ee16ea5a6", "zh:995a592acbcde12f0d34ff5c3b74ec7054743315684b72b927bdc0d33e0e7c4d", "zh:a9f8b88fe365ed9996d3386b415cabb445cf9d6e4b0e0b73f58af3aa31f1fa3d", "zh:dda7c698cf92170665ca3ac1ccdc2177c0bec4807e69075422ae9d5c5308adbd", "zh:eff42af6313499db0b3177a82851e0f2d2706e81cab11372d7d3673c41b15b9c", "zh:fcd6826d4398147314620401a5908dd35c6f2ebac7e7d3a7d77078dbc7c5a0e6", ] }
AzureTRE/templates/workspace_services/innereye/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/templates/workspace_services/innereye/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 3268 }
137
resource "local_file" "mlflow_windows_config" { content = data.template_file.mlflow_windows_config.rendered filename = "${path.module}/../mlflow-vm-config/windows/config.ps1" } resource "local_file" "mlflow_linux_config" { content = data.template_file.mlflow_linux_config.rendered filename = "${path.module}/../mlflow-vm-config/linux/config.sh" } resource "azurerm_storage_share_file" "mlflow_config_windows" { name = "mlflow-windows-config-${local.webapp_name}.ps1" storage_share_id = data.azurerm_storage_share.shared_storage.id source = "${path.module}/../mlflow-vm-config/windows/config.ps1" } resource "azurerm_storage_share_file" "mlflow_config_linux" { name = "mlflow-linux-config-${local.webapp_name}.sh" storage_share_id = data.azurerm_storage_share.shared_storage.id source = "${path.module}/../mlflow-vm-config/linux/config.sh" } resource "azurerm_storage_container" "mlflow_artefacts" { name = local.mlflow_artefacts_container_name storage_account_name = local.storage_name container_access_type = "private" } resource "azurerm_linux_web_app" "mlflow" { name = local.webapp_name location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name service_plan_id = data.azurerm_service_plan.workspace.id https_only = true key_vault_reference_identity_id = azurerm_user_assigned_identity.mlflow.id virtual_network_subnet_id = data.azurerm_subnet.web_apps.id tags = local.tre_workspace_service_tags site_config { http2_enabled = true container_registry_use_managed_identity = true container_registry_managed_identity_client_id = azurerm_user_assigned_identity.mlflow.client_id ftps_state = "Disabled" vnet_route_all_enabled = true minimum_tls_version = "1.2" application_stack { docker_image = "${data.azurerm_container_registry.mgmt_acr.login_server}/microsoft/azuretre/${local.image_name}" docker_image_tag = local.image_tag } } app_settings = { MLFLOW_SERVER_WORKERS = "1" MLFLOW_SERVER_PORT = "5000" MLFLOW_SERVER_HOST = "0.0.0.0" MLFLOW_SERVER_FILE_STORE = format("%s%s%s%s%s%s%s%s%s%s", "postgresql://", random_string.username.result, "@", azurerm_postgresql_server.mlflow.name, ":", random_password.password.result, "@", azurerm_postgresql_server.mlflow.name, ".postgres.database.azure.com:5432/", azurerm_postgresql_database.mlflow.name) MLFLOW_SERVER_DEFAULT_ARTIFACT_ROOT = format("%s%s%s%s%s%s", "wasbs://", azurerm_storage_container.mlflow_artefacts.name, "@", data.azurerm_storage_account.mlflow.name, ".blob.core.windows.net/", azurerm_storage_container.mlflow_artefacts.name) AZURE_STORAGE_CONNECTION_STRING = data.azurerm_storage_account.mlflow.primary_connection_string } logs { application_logs { file_system_level = "Information" } http_logs { file_system { retention_in_days = 7 retention_in_mb = 100 } } } identity { type = "UserAssigned" identity_ids = [azurerm_user_assigned_identity.mlflow.id] } lifecycle { ignore_changes = [tags] } depends_on = [ azurerm_role_assignment.mlflow_acr_pull, azurerm_key_vault_access_policy.mlflow, ] } resource "azurerm_monitor_diagnostic_setting" "mlflow" { name = "diag-${var.tre_id}" target_resource_id = azurerm_linux_web_app.mlflow.id log_analytics_workspace_id = data.azurerm_log_analytics_workspace.tre.id dynamic "log" { for_each = data.azurerm_monitor_diagnostic_categories.mlflow.log_category_types content { category = log.value enabled = contains(local.web_app_diagnostic_categories_enabled, log.value) ? true : false } } } resource "azurerm_role_assignment" "mlflow_acr_pull" { scope = data.azurerm_container_registry.mgmt_acr.id role_definition_name = "AcrPull" principal_id = azurerm_user_assigned_identity.mlflow.principal_id } resource "azurerm_private_endpoint" "mlflow" { # disabling this makes the webapp available on the public internet count = var.is_exposed_externally == false ? 1 : 0 name = "pe-${local.webapp_name}" location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name subnet_id = data.azurerm_subnet.services.id tags = local.tre_workspace_service_tags private_service_connection { private_connection_resource_id = azurerm_linux_web_app.mlflow.id name = "psc-${local.webapp_name}" subresource_names = ["sites"] is_manual_connection = false } private_dns_zone_group { name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"] private_dns_zone_ids = [data.azurerm_private_dns_zone.azurewebsites.id] } lifecycle { ignore_changes = [tags] } } resource "azurerm_key_vault_access_policy" "mlflow" { key_vault_id = data.azurerm_key_vault.ws.id tenant_id = azurerm_user_assigned_identity.mlflow.tenant_id object_id = azurerm_user_assigned_identity.mlflow.principal_id secret_permissions = ["Get", "List", ] } resource "azurerm_user_assigned_identity" "mlflow" { resource_group_name = data.azurerm_resource_group.ws.name location = data.azurerm_resource_group.ws.location name = local.identity_name tags = local.tre_workspace_service_tags lifecycle { ignore_changes = [tags] } }
AzureTRE/templates/workspace_services/mlflow/terraform/web_app.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/mlflow/terraform/web_app.tf", "repo_id": "AzureTRE", "token_count": 2677 }
138
# syntax=docker/dockerfile-upstream:1.4.0 FROM --platform=linux/amd64 debian:bullseye-slim # PORTER_INIT RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache # sqlcmd is required for schemas initialization in AzureSynapse SHELL ["/bin/bash", "-o", "pipefail", "-c"] # ignore lint rule that requires `--no-install-recommends` to allow the microsoft packeges to get everything they need and clear it all up in the end # hadolint ignore=DL3015 RUN apt-get update && apt-get install -y curl gnupg && \ curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \ echo 'deb https://packages.microsoft.com/debian/11/prod bullseye main'> /etc/apt/sources.list.d/prod.list && \ apt-get update && apt-get -y install sqlcmd --no-install-recommends && \ apt-get clean && rm -rf /var/lib/apt/lists/* # Git is required for terraform_azurerm_environment_configuration RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ apt-get update && apt-get install -y git --no-install-recommends # PostgreSql is required by Atlas RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ apt-get update && apt-get install -y postgresql-client gettext apache2-utils curl jq --no-install-recommends # PORTER_MIXINS # Use the BUNDLE_DIR build argument to copy files into the bundle COPY --link . ${BUNDLE_DIR}/
AzureTRE/templates/workspace_services/ohdsi/Dockerfile.tmpl/0
{ "file_path": "AzureTRE/templates/workspace_services/ohdsi/Dockerfile.tmpl", "repo_id": "AzureTRE", "token_count": 512 }
139
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azurerm" { version = "3.58.0" constraints = "3.58.0" hashes = [ "h1:Hvlt3hgTiip6xMeq8/EDGqF8NoVuZjYdTZdO79YNXsw=", "zh:22b19802605ca3e2b811e33650438be3647748cf8f75474c78448c30ac1cad0b", "zh:402ce010f4b68337abaccf8059c37294cabcbdbc3cefd9491dcd312e36ceea3c", "zh:53d2cd15f1631c7ffb47918064d644899cc671d47c72f4dafee4e2a5e69afd14", "zh:5a6b1c55629cff555472d1d43ad6e802693f7fd046c7d37718d4de6f52dbf66b", "zh:6181dccb7bca7cd84b0295a0332f19a7347a9586101f0a5e51b53bda1ec74651", "zh:854181d6a8821b3707775c913e91dd7944fcb55098953ef030168fa3cd0224aa", "zh:b44c758424d1a037fd833e0c69b29e3ac4047ab95653bb3e080835e55bd9badb", "zh:b6ee916a1579bba29b1aacce8897c6733fa97ba0dba2808f1ffa9ab492743fab", "zh:b7ab57044649578410dadfdf4412fc5f8aa085a25ea0b061393e843b49b43b63", "zh:cb68ddb922eb4be74dedf58c953d7f778b4e5f3cdcbe2ea83e02b12296ce4969", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", "zh:fe9e86173134cd9dc8ed65eae8634abc6d6f6806b5b412f54fccf4047052daa0", ] } provider "registry.terraform.io/hashicorp/local" { version = "2.4.0" constraints = "2.4.0" hashes = [ "h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=", "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9", "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf", "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35", "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04", "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406", "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6", "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7", "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2", "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc", "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce", ] } provider "registry.terraform.io/hashicorp/random" { version = "3.5.1" constraints = "3.5.1" hashes = [ "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014", ] }
AzureTRE/templates/workspace_services/ohdsi/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 2015 }
140
ARM_CLIENT_ID="__CHANGE_ME__" ARM_CLIENT_SECRET="__CHANGE_ME__" ARM_TENANT_ID="__CHANGE_ME__" ARM_SUBSCRIPTION_ID="__CHANGE_ME__" AUTH_TENANT_ID="__CHANGE_ME__" # These are passed in if Terraform will create the Workspace Microsoft Entra ID Application REGISTER_AAD_APPLICATION=true CREATE_AAD_GROUPS=true AUTH_CLIENT_ID="__CHANGE_ME__" AUTH_CLIENT_SECRET="__CHANGE_ME__" WORKSPACE_OWNER_OBJECT_ID="__CHANGE_ME__" # These are passed in if you register the Workspace Microsoft Entra ID Application before hand # REGISTER_AAD_APPLICATION=false # CLIENT_ID="__CHANGE_ME__" # CLIENT_SECRET="__CHANGE_ME__" # WORKSPACE_OWNER_OBJECT_ID="" # Used by Porter, aka TRE_RESOURCE_ID ID="MadeUp123" SP_ID="" SCOPE_ID="api://ws_0001" APP_ROLE_ID_WORKSPACE_OWNER="" APP_ROLE_ID_WORKSPACE_RESEARCHER="" APP_ROLE_ID_WORKSPACE_AIRLOCK_MANAGER="" # Complex types are base 64 encoded by resource processor (["10.1.10.0/24"], in this case) ADDRESS_SPACES="WyIxMC4xLjEwLjAvMjQiXQ==" ENABLE_LOCAL_DEBUGGING=true AAD_REDIRECT_URIS="W10=" WORKSPACE_APP_SERVICE_PLAN_SKU=S1
AzureTRE/templates/workspaces/airlock-import-review/.env.sample/0
{ "file_path": "AzureTRE/templates/workspaces/airlock-import-review/.env.sample", "repo_id": "AzureTRE", "token_count": 443 }
141
terraform { # In modules we should only specify the min version required_providers { azurerm = { source = "hashicorp/azurerm" version = ">= 3.8.0" } azuread = { source = "hashicorp/azuread" version = ">= 2.20" } random = { source = "hashicorp/random" version = "~>3.3.0" } } }
AzureTRE/templates/workspaces/base/terraform/aad/providers.tf/0
{ "file_path": "AzureTRE/templates/workspaces/base/terraform/aad/providers.tf", "repo_id": "AzureTRE", "token_count": 170 }
142
resource "azurerm_resource_group" "ws" { location = var.location name = "rg-${local.workspace_resource_name_suffix}" tags = merge( local.tre_workspace_tags, { project = "Azure Trusted Research Environment", source = "https://github.com/microsoft/AzureTRE/" }, ) lifecycle { ignore_changes = [tags] } } // Networking is causing dependencies issues when some parts are deployed from // Azure, especially for storage shares. It became quite difficult to figure out the needed // dependencies for each resource seperatly, so to make it easier we packed all network // resources as a single module that should be depended on. module "network" { source = "./network" location = var.location tre_id = var.tre_id address_spaces = var.address_spaces ws_resource_group_name = azurerm_resource_group.ws.name tre_resource_id = var.tre_resource_id tre_workspace_tags = local.tre_workspace_tags arm_environment = var.arm_environment } module "aad" { source = "./aad" tre_workspace_tags = local.tre_workspace_tags count = var.register_aad_application ? 1 : 0 key_vault_id = azurerm_key_vault.kv.id workspace_resource_name_suffix = local.workspace_resource_name_suffix workspace_owner_object_id = var.workspace_owner_object_id aad_redirect_uris_b64 = var.aad_redirect_uris_b64 create_aad_groups = var.create_aad_groups depends_on = [ azurerm_key_vault_access_policy.deployer, azurerm_key_vault_access_policy.resource_processor, terraform_data.wait_for_dns_vault ] } module "airlock" { count = var.enable_airlock ? 1 : 0 source = "./airlock" location = var.location tre_id = var.tre_id tre_workspace_tags = local.tre_workspace_tags ws_resource_group_name = azurerm_resource_group.ws.name enable_local_debugging = var.enable_local_debugging services_subnet_id = module.network.services_subnet_id short_workspace_id = local.short_workspace_id airlock_processor_subnet_id = module.network.airlock_processor_subnet_id arm_environment = var.arm_environment depends_on = [ module.network, ] } module "azure_monitor" { source = "./azure-monitor" tre_id = var.tre_id location = var.location resource_group_name = azurerm_resource_group.ws.name resource_group_id = azurerm_resource_group.ws.id tre_resource_id = var.tre_resource_id tre_workspace_tags = local.tre_workspace_tags workspace_subnet_id = module.network.services_subnet_id azure_monitor_dns_zone_id = module.network.azure_monitor_dns_zone_id azure_monitor_oms_opinsights_dns_zone_id = module.network.azure_monitor_oms_opinsights_dns_zone_id azure_monitor_ods_opinsights_dns_zone_id = module.network.azure_monitor_ods_opinsights_dns_zone_id azure_monitor_agentsvc_dns_zone_id = module.network.azure_monitor_agentsvc_dns_zone_id blob_core_dns_zone_id = module.network.blobcore_zone_id enable_local_debugging = var.enable_local_debugging depends_on = [ module.network, module.airlock ] }
AzureTRE/templates/workspaces/base/terraform/workspace.tf/0
{ "file_path": "AzureTRE/templates/workspaces/base/terraform/workspace.tf", "repo_id": "AzureTRE", "token_count": 1661 }
143
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <link rel="stylesheet" href="//static2.sharepointonline.com/files/fabric/office-ui-fabric-core/11.0.0/css/fabric.min.css" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="theme-color" content="#000000" /> <!-- manifest.json provides metadata used when your web app is installed on a user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/ --> <link rel="manifest" href="%PUBLIC_URL%/manifest.json" /> <!-- Notice the use of %PUBLIC_URL% in the tags above. It will be replaced with the URL of the `public` folder during the build. Only files inside the `public` folder can be referenced from the HTML. Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will work correctly both with client-side routing and a non-root public URL. Learn how to configure a non-root public URL by running `npm run build`. --> <link rel="icon" href="%PUBLIC_URL%/favicon.ico"> <title>Azure TRE</title> </head> <body> <noscript>You need to enable JavaScript to run this app.</noscript> <div id="root"></div> <!-- This HTML file is a template. If you open it directly in the browser, you will see an empty page. You can add webfonts, meta tags, or analytics to this file. The build step will place the bundled scripts into the <body> tag. To begin the development, run `npm start` or `yarn start`. To create a production bundle, use `npm run build` or `yarn build`. --> </body> </html>
AzureTRE/ui/app/public/index.html/0
{ "file_path": "AzureTRE/ui/app/public/index.html", "repo_id": "AzureTRE", "token_count": 606 }
144
import { Stack, Shimmer, TooltipHost, Icon } from "@fluentui/react"; import { useContext, useEffect, useState } from "react"; import { CostsContext } from "../../contexts/CostsContext"; import { LoadingState } from "../../models/loadingState"; import { WorkspaceContext } from "../../contexts/WorkspaceContext"; import { CostResource } from "../../models/costs"; import { useAuthApiCall, HttpMethod, ResultType } from '../../hooks/useAuthApiCall'; import { ApiEndpoint } from "../../models/apiEndpoints"; interface CostsTagProps { resourceId: string; } export const CostsTag: React.FunctionComponent<CostsTagProps> = (props: CostsTagProps) => { const costsCtx = useContext(CostsContext); const workspaceCtx = useContext(WorkspaceContext); const [loadingState, setLoadingState] = useState(LoadingState.Loading); const apiCall = useAuthApiCall(); const [formattedCost, setFormattedCost] = useState<string | undefined>(undefined); useEffect(() => { async function fetchCostData() { let costs: CostResource[] = []; if (workspaceCtx.costs.length > 0) { costs = workspaceCtx.costs; } else if (costsCtx.costs.length > 0) { costs = costsCtx.costs; } else if(!workspaceCtx.workspace.id) { let scopeId = (await apiCall(`${ApiEndpoint.Workspaces}/${props.resourceId}/scopeid`, HttpMethod.Get)).workspaceAuth.scopeId; const r = await apiCall(`${ApiEndpoint.Workspaces}/${props.resourceId}/${ApiEndpoint.Costs}`, HttpMethod.Get, scopeId, undefined, ResultType.JSON); costs = [{costs: r.costs, id: r.id, name: r.name }]; } const resourceCosts = costs.find((cost) => { return cost.id === props.resourceId; }); if (resourceCosts && resourceCosts.costs.length > 0) { const formattedCost = new Intl.NumberFormat(undefined, { style: 'currency', currency: resourceCosts?.costs[0].currency, currencyDisplay: 'narrowSymbol', minimumFractionDigits: 2, maximumFractionDigits: 2 }).format(resourceCosts.costs[0].cost); setFormattedCost(formattedCost); } setLoadingState(LoadingState.Ok); } fetchCostData(); }, [apiCall, props.resourceId, workspaceCtx.costs, costsCtx.costs, workspaceCtx.workspace.id]); const costBadge = ( <Stack.Item style={{ maxHeight: 18 }} className="tre-badge"> {loadingState === LoadingState.Loading ? ( <Shimmer /> ) : ( <> {formattedCost ? ( formattedCost ) : ( <TooltipHost content="Cost data not yet available"> <Icon iconName="Clock" /> </TooltipHost> )} </> )} </Stack.Item> ); return (costBadge); };
AzureTRE/ui/app/src/components/shared/CostsTag.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/shared/CostsTag.tsx", "repo_id": "AzureTRE", "token_count": 1098 }
145
import { DefaultPalette, IStackItemStyles, IStackStyles, Stack } from "@fluentui/react"; import moment from "moment"; import React from "react"; import { Resource } from "../../models/resource"; import { ComplexPropertyModal } from "./ComplexItemDisplay"; interface ResourcePropertyPanelProps { resource: Resource } interface ResourcePropertyPanelItemProps { header: string, val: any } export const ResourcePropertyPanelItem: React.FunctionComponent<ResourcePropertyPanelItemProps> = (props: ResourcePropertyPanelItemProps) => { const stackItemStyles: IStackItemStyles = { root: { padding: 5, width: 150, color: DefaultPalette.neutralSecondary, wordBreak: 'break-all' } } function renderValue(val: any, title: string) { if (typeof (val) === "string") { if (val && val.startsWith('https://')) { return (<a href={val.toString()} target='_blank' rel="noreferrer">{val}</a>) } return val } if (typeof (val) === "object") return <ComplexPropertyModal val={val} title={title} /> return val.toString() } return ( <> <Stack wrap horizontal> <Stack.Item grow styles={stackItemStyles}> {props.header} </Stack.Item> <Stack.Item grow={3} styles={stackItemStyles}> : {renderValue(props.val, props.header)} </Stack.Item> </Stack> </> ); } export const ResourcePropertyPanel: React.FunctionComponent<ResourcePropertyPanelProps> = (props: ResourcePropertyPanelProps) => { const stackStyles: IStackStyles = { root: { padding: 0, minWidth: 300 } }; function userFriendlyKey(key: String) { let friendlyKey = key.replaceAll('_', ' '); return friendlyKey.charAt(0).toUpperCase() + friendlyKey.slice(1).toLowerCase(); } return ( props.resource && props.resource.id ? <> <Stack wrap horizontal> <Stack grow styles={stackStyles}> <ResourcePropertyPanelItem header={'Resource ID'} val={props.resource.id} /> <ResourcePropertyPanelItem header={'Resource type'} val={props.resource.resourceType} /> <ResourcePropertyPanelItem header={'Resource path'} val={props.resource.resourcePath} /> <ResourcePropertyPanelItem header={'Template name'} val={props.resource.templateName} /> <ResourcePropertyPanelItem header={'Template version'} val={props.resource.templateVersion} /> <ResourcePropertyPanelItem header={'Is enabled'} val={props.resource.isEnabled.toString()} /> <ResourcePropertyPanelItem header={'User'} val={props.resource.user.name} /> <ResourcePropertyPanelItem header={'Last updated'} val={moment.unix(props.resource.updatedWhen).toDate().toDateString()} /> </Stack> <Stack grow styles={stackStyles}> { Object.keys(props.resource.properties).map((key) => { let val = (props.resource.properties as any)[key]; return ( <ResourcePropertyPanelItem header={userFriendlyKey(key)} val={val} key={key} /> ) }) } </Stack> </Stack> </> : <></> ); };
AzureTRE/ui/app/src/components/shared/ResourcePropertyPanel.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/shared/ResourcePropertyPanel.tsx", "repo_id": "AzureTRE", "token_count": 1290 }
146
import { Callout, DirectionalHint, FontWeights, Icon, Link, mergeStyleSets, MessageBar, MessageBarType, Panel, ProgressIndicator, Text } from '@fluentui/react'; import React, { useEffect, useState } from 'react'; import { completedStates, inProgressStates, Operation, successStates } from '../../../models/operation'; import { NotificationItem } from './NotificationItem'; import { IconButton } from '@fluentui/react/lib/Button'; import { HttpMethod, useAuthApiCall } from '../../../hooks/useAuthApiCall'; import { ApiEndpoint } from '../../../models/apiEndpoints'; import { Resource } from '../../../models/resource'; import { useAppDispatch, useAppSelector } from '../../../hooks/customReduxHooks'; import { setInitialOperations, dismissCompleted } from '../../shared/notifications/operationsSlice'; export const NotificationPanel: React.FunctionComponent = () => { const [isOpen, setIsOpen] = useState(false); const [showCallout, setShowCallout] = useState(false); const [calloutDetails, setCalloutDetails] = useState({ title: '', text: '', success: true }); const apiCall = useAuthApiCall(); const operations = useAppSelector((state) => state.operations); const dispatch = useAppDispatch(); useEffect(() => { const loadAllOps = async () => { let opsToAdd = (await apiCall(`${ApiEndpoint.Operations}`, HttpMethod.Get)).operations as Array<Operation>; dispatch(setInitialOperations(opsToAdd)); }; loadAllOps(); }, [apiCall, dispatch]) const callout = (o: Operation, r: Resource) => { if (successStates.includes(o.status)) { setCalloutDetails({ title: "Operation Succeeded", text: `${o.action} for ${r.properties.display_name} completed successfully`, success: true }); } else { setCalloutDetails({ title: "Operation Failed", text: `${o.action} for ${r.properties.display_name} completed with status ${o.status}`, success: false }); } setShowCallout(true); } return ( <> <IconButton id="tre-notification-btn" className='tre-notifications-button' iconProps={{ iconName: 'Ringer' }} onClick={() => setIsOpen(true)} title="Notifications" ariaLabel="Notifications" /> { operations.items && operations.items.filter((o: Operation) => inProgressStates.includes(o.status)).length > 0 && <span style={{ marginTop: -15, display: 'block' }}> <ProgressIndicator barHeight={2} /> </span> } { showCallout && !isOpen && <Callout ariaLabelledBy={'labelId'} ariaDescribedBy={'descriptionId'} role="dialog" className={styles.callout} gapSpace={0} target={'#tre-notification-btn'} isBeakVisible={true} beakWidth={20} onDismiss={() => { setShowCallout(false) }} directionalHint={DirectionalHint.bottomLeftEdge} setInitialFocus > <Text block variant="xLarge" id={'labelId'}> {calloutDetails.success ? <Icon iconName="CheckMark" style={{ color: '#009900', position: 'relative', top: 4, marginRight: 10 }} /> : <Icon iconName="Error" style={{ color: '#990000', position: 'relative', top: 4, marginRight: 10 }} /> } {calloutDetails.title} </Text> <br /> <Text block variant="medium" id={'descriptionId'}> {calloutDetails.text} </Text> </Callout> } <Panel isLightDismiss isHiddenOnDismiss={true} headerText="Notifications" isOpen={isOpen} onDismiss={() => { setIsOpen(false) }} closeButtonAriaLabel="Close Notifications" > <div className="tre-notifications-dismiss"> <Link href="#" onClick={(e) => { dispatch(dismissCompleted()); return false; }} disabled={ operations.items.filter((o: Operation) => o.dismiss !== true && completedStates.includes(o.status)).length === 0 }>Dismiss Completed</Link> </div> { operations.items.length === 0 && <div style={{ marginTop: '20px' }}> <MessageBar messageBarType={MessageBarType.success} isMultiline={false} > No notifications to display </MessageBar> </div> } <ul className="tre-notifications-list"> { operations.items.map((o: Operation, i: number) => { return ( <NotificationItem operation={o} key={i} showCallout={(o: Operation, r: Resource) => callout(o, r)} /> ) }) } </ul> </Panel> </> ); }; const styles = mergeStyleSets({ buttonArea: { verticalAlign: 'top', display: 'inline-block', textAlign: 'center', margin: '0 100px', minWidth: 130, height: 32, }, configArea: { width: 300, display: 'inline-block', }, button: { width: 130, }, callout: { width: 320, padding: '20px 24px', }, title: { marginBottom: 12, fontWeight: FontWeights.semilight, }, link: { display: 'block', marginTop: 20, }, });
AzureTRE/ui/app/src/components/shared/notifications/NotificationPanel.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/shared/notifications/NotificationPanel.tsx", "repo_id": "AzureTRE", "token_count": 2218 }
147
import React from "react"; import { CreateFormResource } from "../models/resourceType"; export const CreateUpdateResourceContext = React.createContext({ openCreateForm: (createFormResource: CreateFormResource) => { } });
AzureTRE/ui/app/src/contexts/CreateUpdateResourceContext.ts/0
{ "file_path": "AzureTRE/ui/app/src/contexts/CreateUpdateResourceContext.ts", "repo_id": "AzureTRE", "token_count": 56 }
148
import { Resource } from "./resource"; export interface SharedService extends Resource { }
AzureTRE/ui/app/src/models/sharedService.ts/0
{ "file_path": "AzureTRE/ui/app/src/models/sharedService.ts", "repo_id": "AzureTRE", "token_count": 20 }
149
{ "chemical2id": { "naloxone": "D009270", "clonidine": "D003000", "nalozone": "-1", "alpha-methyldopa": "D008750", "[3h]-naloxone": "-1", "[3h]-dihydroergocryptine": "-1", "lidocaine": "D008012", "suxamethonium": "D013390", "suxamethonium chloride": "D013390", "sch": "D013390", "galanthamine hydrobromide": "D005702", "scopolamine": "D012601", "hyoscine": "D012601", "physostigmine": "D010830", "lithium": "D008094", "li": "D008094", "creatinine": "D003404", "fusidic acid": "D005672", "cyclosporin": "D016572", "cocaine": "D003042", "sulpiride": "D013469", "antidepressant": "D000928", "desferrioxamine": "D003676", "aluminium": "-1", "magnesium": "D008274", "acetylcholine": "D000109", "chloroacetaldehyde": "C004656", "cyclophosphamide": "D003520", "ifosfamide": "D007069", "caa": "C004656", "mesna": "D015080", "nitroglycerin": "D005996", "clotiazepam": "C084599", "thienodiazepine": "C013295", "benzodiazepines": "D001569", "ketoconazole": "D007654", "cortisol": "D006854", "deoxycorticosterone": "D003900", "11-deoxycortisol": "D003350", "aldosterone": "D000450", "angiotensin": "D000809", "captopril": "D002216", "tranexamic acid": "D014148", "amca": "D014148", "urea": "D014508", "angiotension ii": "D000804", "prostacyclin": "D011464", "bradykinin": "D001920", "labetalol": "D007741", "nitroprusside": "D009599", "po2": "C093415", "carbamazepine": "D002220", "folate": "D005492", "cbz": "D002220", "propylene glycol": "D019946", "hexafluorodiethyl ether": "D005481", "hfde": "D005481", "sodium": "D012964", "norepinephrine": "D009638", "epinephrine": "D004837", "hydralazine": "D006830", "etoposide": "D005047", "methotrexate": "D008727", "actomycin-d": "D003609", "cisplatin": "D002945", "paracetamol": "D000082", "phenacetin": "D010615", "dapsone": "D003622", "glutathione": "D005978", "gsh": "D005978", "pentose phosphate": "D010428", "metoprolol": "D008790", "propafenone": "D011405", "diltiazem": "D004110", "sparteine": "D013034", "debrisoquine": "D003647", "triazolam": "D014229", "triazolo": "D014229", "morphine": "D009020", "haloperidol": "D006220", "pentazocine": "D010423", "amphotericin": "D000666", "amphotericin b": "D000666", "amiodarone": "D000638", "prednisolone": "D011239", "steroid": "D013256", "indomethacin": "D007213", "prostaglandins": "D011453", "flunitrazepam": "D005445", "timolol": "D013999", "vitamin d3": "D002762", "calcium": "D002118", "thiamine": "D013831", "riboflavin": "D012256", "isoniazid": "D007538", "dothiepin hydrochloride": "D004308", "dothiepin": "D004308", "amitriptyline": "D000639", "diazepam": "D003975", "propranolol": "D011433", "aspirin": "D001241", "n-[4-(5-nitro-2-furyl)-2-thiazolyl]-formamide": "D005200", "fanft": "D005200", "estrogen": "D004967", "diethylstilbestrol": "D004054", "des": "D004054", "acriflavine": "D000167", "triamterene": "D014223", "dyazide": "C020743", "hydrochlorothiazide-triamterene": "C020743", "adriamycin": "D004317", "phosphorylcreatine": "D010725", "phophorylcreatine": "D010725", "creatine": "D003401", "adenosine": "D000241", "atp": "D000255", "streptomycin": "D013307", "rifampin": "D012293", "puromycin aminonucleoside": "D011692", "oxygen": "D010100", "pan": "D011692", "thiobarbituric acid": "C029684", "clomipramine": "D002997", "lisinopril": "D017706", "steroids": "D013256", "diphenhydramine": "D004155", "coniine": "C007112", "nicotine": "D009538", "prostaglandin e1": "D000527", "trimethaphan": "D014294", "pge1": "D000527", "tmp": "D014294", "isoflurane": "D007530", "lactate": "D019344", "pilocarpine": "D010862", "fluoxetine": "D005473", "dopamine": "D004298", "acetaminophen": "D000082", "ceftriaxone": "D002443", "beta lactam": "D047090", "bilirubin": "D001663", "clozapine": "D003024", "tetrandrine": "C009438", "fangchinoline": "C060802", "tet": "C009438", "fan": "C060802", "bisbenzylisoquinoline": "D044182", "ep": "D004837", "acetylsalicylic acid": "D001241", "asa": "D001241", "gemcitabine": "C056507", "vinorelbine": "C030852", "vnb": "C030852", "gem": "C056507", "warfarin": "D014859", "vitamin d": "D014807", "phosphate": "D010710", "vitamin k": "D014812", "gamma-carboxylated": "D015055", "gamma-carboxyglutamate": "D015055", "antidepressants": "D000928", "serotonin reuptake inhibitors": "D017367", "ssris": "D017367", "caffeine": "D002110", "fentanyl": "D005283", "la": "D019344", "zidovudine": "D015215", "lamivudine": "D019259", "indinavir": "D019469", "oral contraceptives": "D003276", "progestagen": "D011372", "desogestrel": "D017135", "gestodene": "C033273", "levonorgestrel": "D016912", "oral-contraceptive": "D003276", "oral contraceptive": "D003276", "gr 55562": "C103477", "cp 93129": "C065046", "ticlopidine": "D013988", "clopidogrel": "C055162", "no": "D009569", "calcitonin gene-related peptide": "D015740", "cgrp": "D015740", "serotonin": "D012701", "5-hydroxytriptamine": "D012701", "5-ht": "D012701", "paclitaxel": "D017239", "urotensin-ii": "D014579", "u-ii": "D014579", "apomorphine": "D001058", "corticosterone": "D003345", "acitretin": "D017255", "gabapentin": "C040029", "capsaicin": "D002211", "mdma": "D018817", "3,4-methylenedioxymethamphetamine": "D018817", "ecstasy": "D018817", "citrate": "C102006", "bumetanide": "D002034", "pravastatin": "D017035", "paroxetine": "D017374", "calcium gluconate": "D002125", "sodium citrate": "C102006", "loop diuretic": "D049994", "sirolimus": "D020123", "srl": "D020123", "atropine": "D001285", "alpha,beta-methylene adenosine-5'-triphosphate": "C002630", "alpha,beta-meatp": "C002630", "4-diphenylacetoxy-n-methylpiperidine": "C042375", "4-damp": "C042375", "methoctramine": "C054938", "pirenzepine": "D010890", "carbachol": "D002217", "potassium": "D011188", "isoprenaline": "D007545", "bzds": "D001569", "oxazepam": "D010076", "temazepam": "D013693", "zopiclone": "C515050", "disulfiram": "D004221", "alcohol": "D000431", "heparin": "D006493", "clomiphene": "D002996", "clomiphene citrate": "D002996", "cc": "D002996", "verapamil": "D014700", "histamine": "D006632", "vitamin d2": "D004872", "cholesterol": "D002784", "luminal": "D010634", "3-methyladenine": "C025946", "3ma": "C025946", "k": "D011188", "valproic acid": "D014635", "cannabis": "D002188", "argatroban": "C031942", "antituberculosis": "D000995", "capecitabine": "C110904", "fluoropyrimidines": "-1", "5-fluorouracil": "D005472", "5-fu": "D005472", "neurotensin": "D009496", "neurotensin type-1 receptor antagonist": "C079087", "sr48692": "C079087", "methyl dopa": "D008750", "sodium nitroprusside": "D009599", "h2o": "D014867", "l-dopa": "D007980", "mk-486": "D002230", "mao": "D008995", "dl-threo-dihydroxyphenylserine": "D015103", "fla-63": "D005406", "pimozide": "D010868", "5-htp": "D006916", "imipramine": "D007099", "ipratropium bromide": "D009241", "theophylline": "D013806", "ipratropium": "D009241", "prazosin": "D011224", "isosorbide dinitrate": "D007548", "diclofenac sodium": "D004008", "voltarol": "D004008", "phenylacetic acid": "C025136", "diclofenac": "D004008", "glyburide": "D005905", "sulfonylureas": "D013453", "sulfonylurea": "D013453", "snp": "D009599", "sodium pentothal": "D013874", "xenon": "D014978", "nitrous oxide": "D009609", "hg": "D008628", "prednisone": "D011241", "sulphasalazine": "D012460", "dacarbazine": "D003606", "dtic": "D003606", "metoclopramide": "D008787", "busulfan": "D002066", "saralasin": "D012504", "halothane": "D006221", "angiotensin ii": "D000804", "carbimazole": "D002231", "benzylthiouracil": "C019269", "n omercazole": "D002231", "basd ne": "C019269", "vitamin b12": "D014805", "folinic acid": "D002955", "zdv": "D015215", "cisplatinum": "D002945", "fluoroacetate": "D005463", "fluorocitrate": "C007744", "dihydrouracil": "C007419", "oxcarbazepine": "C036006", "chlorpromazine": "D002746", "ampicillin": "D000667", "gentamicin": "D005839", "methylprednisolone": "D008775", "penicillins": "D010406", "amoxicillin": "D000658", "beta-lactam": "D047090", "ax": "D000658", "penicillin": "D010406", "benzylpenicilloyl-poly-l-lysine": "-1", "bpo-pll": "-1", "benzylpenicilloate": "-1", "benzylpenicillin": "D010400", "pg": "D010400", "amp": "D000667", "bpo": "-1", "atracurium": "D001279", "vecuronium bromide": "D014673", "atracurium besylate": "D001279", "benzylisoquinolinium": "-1", "heparan sulphate": "D006497", "streptozotocin": "D013311", "cuprolinic blue": "C015445", "glycosaminoglycan": "D006025", "nitrogranulogen": "D008466", "ng": "D008466", "mtx": "D008727", "cy": "D003520", "divalproex sodium": "D014635", "mitoxantrone": "D008942", "leucovorin": "D002955", "mfl": "C085788", "mfl regimen": "C085788", "anthracycline": "D018943", "vasopressin": "D014667", "arginine vasopressin": "D001127", "avp": "D001127", "licl": "D018021", "mepivacaine": "D008619", "adrenaline": "D004837", "midazolam": "D008874", "tacrolimus": "D016559", "fk506": "D016559", "pg-9": "C087567", "3alpha-tropyl 2-(p-bromophenyl)propionate": "C087567", "s-(-)-et-126": "C098725", "ace inhibitors": "D000806", "angiotensin-converting enzyme (ace) inhibitor": "D000806", "oc": "D003276", "serotonergic antidepressants": "D018490", "venlafaxine": "C047426", "sertraline": "D020280", "lithium carbonate": "D016651", "tizanidine": "C023754", "bd1008": "C085527", "oligodeoxynucleotide": "D009838", "bd1018": "-1", "3s-1-[2-(3,4-dichlorophenyl)ethyl]-1,4-diazabicyclo[4.3.0]nonane": "-1", "bd1063": "C093337", "1-[2-(3,4-dichlorophenyl)ethyl]-4-methylpiperazine": "C093337", "lr132": "-1", "gaba": "D005680", "nmda": "D016202", "di-o-tolylguanidine": "C050232", "dtg": "C050232", "bd1031": "-1", "3r-1-[2-(3,4-dichlorophenyl)ethyl]-1,4-diazabicyclo[4.3.0]nonane": "-1", "e4031": "C063968", "cisapride": "D020117", "terfenadine": "D016593", "terodiline": "C010637", "vincristin": "D014750", "vincristine": "D014750", "prochlorperazine": "D011346", "antithymocyte globulin": "D000961", "d-penicillamine": "D010396", "ethanol": "D000431", "neostigmine": "D009388", "ach": "D000109", "kcl": "C522374", "dexamethasone": "D003907", "cyproterone acetate": "D017373", "ethinyl estradiol": "D004997", "cpa": "C048599", "ee": "D004997", "minoxidil": "D008914", "leuprolide acetate": "D016729", "lhrh-a": "D016729", "flutamide": "D005485", "testosterone": "D013739", "amb": "D000666", "posaconazole": "C101425", "quinine": "D011803", "organophosphate": "D010755", "organophosphorus": "D010755", "op": "D010755", "diisopropylfluorophosphate": "D007531", "dfp": "D007531", "sarin": "D012524", "soman": "D012999", "ops": "D010755", "pralidoxime-2-chloride": "D011220", "2pam": "D011220", "n(6)-cyclopentyl adenosine": "C048599", "dizocilpine maleate": "D016291", "atropine sulfate": "D001285", "mk801": "D016291", "imidazoline": "D048288", "rilmenidine": "C032302", "17beta-estradiol": "D004958", "tincture of crataegus": "C007145", "isoproterenol": "D007545", "tcr": "C007145", "alcoholic extract of the berries of hawthorn": "C007145", "crataegus oxycantha": "C007145", "adp": "D000244", "raloxifene": "D020849", "tamoxifen": "D013629", "gnc92h2": "-1", "raloxifene hydrochloride": "D020849", "extract of daucus carota seeds": "D010936", "dce": "D010936", "daucus carota extract": "D010936", "triamcinolone": "D014221", "bupivacaine": "D002045", "triamcinolone diacetate": "C030262", "estradiol- and testosterone esters": "C032109", "rapamycin": "D020123", "statin": "D019821", "statins": "D019821", "cyclic nucleotide": "D009712", "smoking": "D012906", "terbutaline": "D013726", "levetiracetam": "C026098", "valproate": "D014635", "lev": "C026098", "vpa": "D014635", "ne": "D009638", "yohimbine": "D015016", "methylphenidate": "D008774", "thalidomide": "D013792", "lenalidomide": "C467567", "green tea": "D010936", "vitamin e": "D014810", "iso": "D007545", "ca": "D002118", "na": "D012964", "mg": "D008274", "pegylated interferon": "C417083", "ribavirin": "D012254", "pegylated interferon (ifn) alpha-2b": "C417083", "pegylated ifn alpha-2b": "C417083", "ifn": "C417083", "glycine": "D005998", "ssr103800": "-1", "glutamate": "D018698", "n-methyl-d-aspartate": "D016202", "amphetamine": "D000661", "mk-801": "D016291", "olanzapine": "C076029", "aripiprazole": "C094645", "phenylephrine": "D010656", "ephedrine": "D004809", "propofol": "D015742", "dipyridamole": "D004176", "hydrocortisone": "D006854", "radiocalcium": "D002132", "calcium chloride": "D002122", "betaxolol": "D015784", "ifosfamide, vincristine, and dactinomycin": "C064227", "iva": "C064227", "mecamylamine": "D008464", "hexamethonium": "D018738", "sch 23390": "C534628", "raclopride": "D020891", "fluphenazine": "D005476", "phno": "-1", "skf 38393": "D015647", "mefloquine": "D015767", "mazindol": "D008454", "pentoxifylline": "D010431", "trental": "D010431", "thallium": "D013793", "methylxanthine": "C008514", "methylxanthines": "C008514", "pentoxyifylline": "D010431", "levodopa": "D007980", "noradrenaline": "D009638", "oxytocin": "D010121", "urethane": "D014520", "delta 9-tetrahydrocannabinol": "D013759", "thc": "D013759", "6-hydroxydopamine": "D016627", "6-ohda": "D016627", "desipramine": "D003891", "alfentanil": "D015760", "metocurine": "C032943", "cis-platin": "D002945", "deferoxamine": "D003676", "iron": "D007501", "flurbiprofen": "D005480", "mipafox": "C005238", "n, n'-diisopropylphosphorodiamidofluoridate": "C005238", "phenylpropanolamine": "D010665", "ppa": "D010665", "azathioprine": "D001379", "isoproterenols": "D007545", "polyethylene glycol 400": "D011092", "peg 400": "D011092", "adr": "D004317", "bcnu": "D002330", "1,3-bis-(2-chloroethyl)-1-nitrosourea": "D002330", "sodium chloride": "D012965", "cimetidine": "D002927", "phenobarbitone": "D010634", "carbon tetrachloride": "D002251", "ccl4": "D002251", "amiloride": "D000584", "alprazolam": "D000525", "dup 753": "D019808", "blood nitrogen urea": "D001806", "losartan": "D019808", "sodium bicarbonate": "D017693", "papaverine": "D010208", "phentolamine": "D010646", "octreotide": "D015282", "sodium valproate": "D014635", "amikacin": "D000583", "aminoglycosides": "D000617", "vancomycin": "D014640", "fluorescein": "D019793", "hydrochlorofluorocarbons": "-1", "ozone": "D010126", "chlorofluorocarbons": "D017402", "hcfcs": "-1", "cfcs": "D017402", "1,1-dichloro-2,2,2-trifluoroethane": "C067411", "hcfc 123": "C067411", "1-chloro-1,2,2,2-tetrafluoroethane": "C072959", "hcfc 124": "C072959", "1-bromo-1-chloro-2,2,2-trifluoroethane": "D006221", "trifluoroacetyl": "D014269", "hcfcs 123": "C067411", "hcfcs 124": "C072959", "prilocaine": "D011318", "pethidine": "D008614", "norpethidine": "C002752", "ibuprofen": "D007052", "ursodeoxycholic acid": "D014580", "appetite suppressants": "D001067", "appetite suppressant": "D001067", "appetite-suppressants": "D001067", "fenfluramines": "D005277", "methadone": "D008691", "heroine": "D003932", "tam": "D013629", "alpha-tocopherol": "D024502", "alpha-t": "D024502", "alpha-tocopherol acetate": "D024502", "alpha-tac": "D024502", "hydroxyl": "D017665", "aaph": "C046728", "tocopherols": "D024505", "nitric oxide": "D009569", "n(g)-nitro-l-arginine methyl ester": "D019331", "l-name": "D019331", "bromocriptine": "D001971", "domperidone": "D004294", "ketamine": "D007649", "cyclosporine": "D016572", "furosemide": "D005665", "amine": "D000588", "doxorubicin": "D004317", "ih636 grape seed proanthocyanidin extract": "C511402", "grape seed extract": "C511402", "proanthocyanidins": "D044945", "gspe": "C511402", "aap": "D000082", "ami": "D000638", "dox": "D004317", "citalopram": "D015283", "metamizol": "D004177", "charcoal": "D002606", "ifx": "D007069", "gamma-vinyl gaba": "D020888", "vigabatrin": "D020888", "gvg": "D020888", "da": "D018491", "estradiol": "D004958", "kainic acid": "D007608", "silver": "D012834", "hexamethonium chloride": "D018738", "bicuculline": "D001640", "azithromycin": "D017963", "nitro-l-arginine methyl ester": "D019331", "aminoglycoside": "D000617", "lignocaine": "D008012", "contrast medium": "D003287", "iopamidol": "D007479", "iodixanol": "C044834", "n-acetylcysteine": "D000111", "maltolyl p-coumarate": "C524754", "maltol": "C008316", "p-coumaric acid": "C032171", "amyloid beta peptide(1-42)": "C544092", "h2o2": "D006861", "levofloxacin": "D064704", "fluoroquinolone": "D024841", "hbsag": "D006514", "tyrosine": "D014443", "methionine": "D008715", "aspartate": "D001224", "dextromethorphan": "D003915", "sirolmus": "D020123", "mycophenolate mofetil": "C063008", "ace inhibitor": "D000806", "angiotensin-releasing blocker": "D057911", "arb": "D057911", "acei": "D000806", "n-pyrimidinyl-2-phenoxyacetamides": "D010642", "n-pyrimidinyl-2-phenoxyacetamide": "D010642", "methamphetamine": "D008694", "meth": "D008694", "mptp": "D015632", "everolimus": "C107135", "tac": "D016559", "pergolide": "D010479", "papaverine hydrochloride": "D010208", "bisphosphonate": "D004164", "alendronate": "D019386", "alendronate sodium": "D019386", "bisphosphonates": "D004164", "co-trimoxazole": "D015662", "oxycodone": "D010098", "modafinil": "C048833", "asenapine": "C522667", "lopinavir/ritonavir": "C558899", "pentobarbital": "D010424", "xanthine": "D019820", "dex": "D003907", "allopurinol": "D000493", "urate": "D014527", "risperidone": "D018967", "serotonin 5-ht2": "D044348", "simvastatin-ezetimibe": "C492458", "ezetimibe": "C108606", "simvastatin": "D019821", "simvastatin 10 mg-ezetimibe 40 mg": "C492458", "simvastatinezetimibe": "C492458", "escitalopram": "D015283", "uridine diphosphate": "D014530", "simvastatin hydroxy acid": "C532833", "methamphetamines": "D008694", "thyroxine": "D013974", "lipopolysaccharide": "D008070", "3,4-dihydroxyphenylacetic acid": "D015102", "enalapril": "D004656", "succinylcholine": "D013390", "ketamine hydrochloride": "D007649", "hepatitis b virus surface antigen": "D006514", "hbs ag": "D006514", "midazolan": "D008874", "mz": "D008874", "flumazenil": "D005442", "fl": "D005442", "bepridil": "D015764", "selegiline": "D012642", "metamphetamine": "D008694", "propylthiouracil": "D011441", "hydrogen cyanamide": "D003484", "aldehyde": "D000079", "acetaldehyde": "D000079", "dormex": "D003484", "trimipramine": "D014299", "tri": "D014299", "5-hydroxytryptamine": "D012701", "reserpine": "D012110", "5-hydroxytryptophan": "D006916", "d-amphetamine": "D003913", "quinpirole": "D019257", "ranitidine": "D011899", "acetazolamide": "D000086", "lindane": "D001556", "gamma benzene hexachloride": "D001556", "8-aminoquinoline": "C080436", "8-[(4-amino-l-methylbutyl)amino]- 5-(l-hexyloxy)-6-methoxy-4-methylquinoline": "C068820", "wr242511": "C068820", "kanamycin": "D007612", "sumatriptan": "D018170", "salbutamol": "D000420", "docetaxel": "C067311", "quaternary ammonium": "D000644", "um-272": "C002616", "n,n-dimethylpropranolol": "C002616", "ouabain": "D010042", "ciprofloxacin": "D002939", "danazol": "D003613", "combination of amoxicillin and clavulanic acid": "D019980", "amoxicillin-clavulanic acid": "D019980", "clavulanic acid": "D019818", "gamma-aminobutyric acid": "D005680", "procainamide": "D011342", "dextran": "D003911", "hoe-140": "C065679", "5-azacytidine": "D001374", "5-azc": "D001374", "benzo[a]-pyrene": "D001564", "n-methyl-n-nitrosourea": "D008770", "1,2-dimethylhydrazine": "D019813", "1,2-dmh": "D019813", "2-acetylaminofluorene": "D015073", "[3h]-5-azadeoxycytidine": "C014347", "cytosine": "D003596", "trihexyphenidyl": "D014282", "catecholamines": "D002395", "choline": "D002794", "dobutamine": "D004280", "tc99m-sestamibi": "D017256", "sestamibi": "D017256", "atazanavir": "C413408", "blood urea nitrogen": "D001806", "alanine": "D000409", "fluvastatin": "C065180", "rosuvastatin": "C422923", "atorvastatin": "C065179", "lovastatin": "D008148", "aponidine hydrochloride": "C016986", "apraclonidine": "C016986", "carmofur": "C017367", "mexiletine": "D008801", "mexitil-perlongets": "D008801", "calcitonin": "D002116", "molindone": "D008972", "ro 40-5967": "D020748", "nifedipine": "D009543", "hepatitis b virus e antigen": "D006513", "lam": "D019259", "hbeag": "D006513", "nucleotide": "D009711", "adefovir": "C053001", "tenofovir": "C096918", "sevoflurane": "C009250", "enflurane": "D004737", "apamin": "D001030", "superoxide": "D013481", "o2-": "D013481", "ato": "C065179", "99mtc-glucarate": "C067171", "glucaric acid": "D005937", "hip-his-leu": "C010980", "tobramycin": "D014031", "gentamicin sulfate": "D005839", "tobramycin sulfate": "D014031", "mpep": "C121465", "2-methyl-6-(phenylethynyl)pyridine": "C121465", "veratridine": "D014701", "desipramine hcl": "D003891", "cinacalcet hcl": "C476217", "cinacalcet": "C476217", "corticosteroids": "D000305", "paramethasone": "D010248", "metronidazole": "D008795", "dfo": "D003676", "angiotensin converting enzyme (ace) inhibitors": "D000806", "azidothymidine": "D015215", "azt": "D015215", "thymidine": "D013936", "carbon dioxide": "D002245", "phenytoin": "D010672", "halogenated hydroxyquinolines": "D006912", "clioquinol": "D007464", "tamca": "D014148", "sucrose": "D013395", "d-pen": "D010396", "hyaluronate": "D006820", "anthracyclines": "D018943", "daunorubicin": "D003630", "dnr": "D003630", "sulfonamides": "D013449", "nitrofurantoins": "D009582", "erythromycins": "D004917", "cephalosporins": "D002511", "quinolones": "D015363", "chlorambucil": "D002699", "alkylating agents": "D000477", "purine": "D011687", "folic acid": "D005492", "methyldopa": "D008750", "taxol": "D017239", "4'-0-tetrahydropyranyladriamycin": "C027260", "pirarubicin": "C027260", "adriamycinol": "C010013", "adriamycinone": "C010012", "tetrahydropyranyladriamycinol": "C027260", "gamma-hexachlorocyclohexane": "D001556", "gamma-hch": "D001556", "ptz": "D010433", "picrotoxin": "D010852", "ptx": "D010852", "3-mercaptopropionic acid": "D015097", "mpa": "D015097", "bcc": "D001640", "methyl 6,7-dimethoxy-4-ethyl-b-carboline-3-carboxylate": "C034818", "dmcm": "C034818", "strychnine": "D013331", "str": "D013331", "pentylenetetrazol": "D010433", "3h-tbob": "C046308", "t-butyl bicyclo-orthobenzoate": "C046308", "carboplatin": "D016190", "glycerin": "D005990", "amsacrine": "D000677", "ci-921": "C042315", "nsc 343499": "C042315", "9-[[2-methoxy-4-[(methylsulphonyl)amino]phenyl]amino] -n,5-dimethyl- 4-acridinecarboxamide": "C042315", "alpha-lipoic acid": "D008063", "ximelagatran": "C426686", "irbesartan": "C081309", "idraparinux": "C479958", "cyclosporin a": "D016572", "cefonicid": "D015790", "cefazedone": "C021341", "cephalosporin": "D002511", "pyridoxine": "D011736", "pyridoxine hydrochloride": "D011736", "niacinamide": "D009536", "pyridoxal": "D011730", "kynurenine": "D007737", "tryptophan": "D014364", "nra0160": "C121249", "5 - [2- ( 4- ( 3 - fluorobenzylidene) piperidin-1-yl) ethyl] - 4 -(4-fluorophenyl) thiazole-2-carboxamide": "C121249", "map": "D008694", "phencyclidine": "D010622", "pcp": "D010622", "troleandomycin": "D014217", "gemfibrozil": "D015248", "niacin": "D009525", "sulfasalazine": "D012460", "carbidopa": "D002230", "chlormethiazole": "D002719", "nitrazepam": "D009567", "mmf": "-1", "thioperamide": "C052075", "(r)-alpha-methylhistamine": "C069357", "ramh": "C069357", "thp": "C052075", "wr-2721": "D004999", "amifostine": "D004999", "mannitol": "D008353", "penicillamine": "D010396", "chloroquine": "D002738", "cephalothin": "D002512", "cephalothin sodium": "D002512", "s-limonene": "C008281", "s-perillyl alcohol": "C032208", "ro4368554": "C507242", "3-benzenesulfonyl-7-(4-methyl-piperazin-1-yl)1h-indole": "C507242", "trp": "D014364", "metrifonate": "D014236", "dfu": "C106876", "piroxicam": "D010894", "5,5-dimethyl-3-(3-fluorophenyl)-4-(4-methylsulphonyl) phenyl-2(5h)-furanon": "C106876", "allopregnanolone": "D011280", "3alpha-hydroxy-5alpha-pregnan-20-one": "D011280", "pregnanolone": "D011280", "3alpha-hydroxy-5beta-pregnan-20-one": "D011280", "ganaxolone": "C105051", "3alpha-hydroxy-3beta-methyl-5alpha-pregnan-20-one": "C105051", "cyclosporine a": "D016572", "csa": "D016572", "fucoidan": "C007789", "aceto-acetate": "C016635", "3-hydroxybutyrate": "D020155", "salvianolic acid a": "C066201", "malondialdehyde": "D008315", "n-(2-propylpentanoyl)urea": "C108761", "amino acid": "D000596", "vpu": "C108761", "telithromycin": "C106791", "spironolactone": "D013148", "cr": "D002857", "thiazide": "D049971", "nimodipine": "D009553", "glucose": "D005947", "carbon monoxide": "D002248", "udca": "D014580", "2-chloroprocaine-ce": "C004616", "sulindac": "D013467", "ritodrine": "D012312", "midazolam hydrochloride": "D008874", "levodopa/carbidopa": "C009265", "carbidopa/levodopa": "C009265", "vitamin b6": "D025101", "l-alpha-glyceryl-phosphorylcholine": "D005997", "l-alpha-glycerylphosphorylcholine": "D005997", "l-alpha-gfc": "D005997", "benzoylecgonine": "C005618", "be": "C005618", "ly274614": "C070935", "3sr,4ars,6sr,8ars-6-[phosphonomethyl]decahydr oisoquinoline-3- carboxylic acid": "C070935", "iprindole": "D007488", "dizocilpine": "D016291", "cacl2": "D002122", "cacl(2)": "D002122", "nacl": "D012965", "catecholamine": "D002395", "pac": "D017239", "glutamine": "D018698", "salt": "D017673", "h": "D006859", "ribonucleic acid": "D012313", "cholesteryl ester": "D002788", "ritonavir": "D019438", "e2": "D004958", "progesterone": "D011374", "cholesteryl esters": "D002788", "phenobarbital": "D010634", "tariquidar": "C402343", "l-arginine": "D001120", "hydrocortisone acetate": "C021650", "[3h]-l-arginine": "D001120", "cya": "D016572", "amantadine": "D000547", "amines": "D000588", "amantadine hydrochloride": "D000547", "normetanephrine": "D009647", "methylergonovine": "D008755", "aminophylline": "D000628", "co2": "D002245", "carbamylcholin": "D002217", "rizatriptan": "C093622", "ergotamine": "D004878", "bleomycin": "D001761", "vinca alkaloid": "D014748", "nelarabine": "C104457", "arag": "C104457", "vp": "D005047", "cpm": "D003520", "bupropion": "D016642", "zyban": "D016642", "bupropion hydrochloride": "D016642", "indocyanine green": "D007208", "fluorescein sodium": "D019793", "hoe 140": "C065679", "des arg10 hoe 140": "C078665", "des-arg10hoe 140": "C078665", "melphalan": "D008558", "thiotepa": "D013852", "lamotrigine": "C047781", "ethosuximide": "D005013", "omeprazole": "D009853", "didanosine": "D016049", "ddi": "D016049", "sunitinib": "C473478", "sorafenib": "C471405", "pegylated interferon alpha 2a": "C100416", "topiramate": "C052342", "3 alpha-tropyl 2-(p-bromophenyl)propionate": "C087567", "dicyclomine": "D004025", "hemicholinium-3": "D006426", "gamma-aminobutyric acidb": "D005680", "3-aminopropyl-diethoxy-methyl-phosphinic acid": "C066430", "r-(alpha)-methylhistamine": "C069357", "5-hydroxytryptamine4": "D012701", "2-methoxy-4-amino-5-chlorobenzoic acid 2-(diethylamino)ethyl ester": "C072790", "5-hydroxytryptamin1a": "D012701", "1-(2-methoxyphenyl)-4-[4-(2-phthalimido)butyl]piperazine": "C058895", "prostaglandin f2 alpha": "D015237", "p-choloroaniline": "C004658", "chlorhexidine-digluconate": "C010882", "colistin": "D003091", "povidone-iodine": "D011206", "picloxydine": "C005253", "bromperidol": "C006820", "curcumin": "D003474", "pyrrolidine dithiocarbamate": "C020972", "pdtc": "C020972", "propylthio- uracil": "D011441", "beta-carboline": "C036150", "methyl beta-carboline-3-carboxylate": "C036150", "beta-ccm": "C036150", "benzodiazepine": "D001569", "testosterone heptylate": "C004648", "thiopentone": "D013874", "glycopyrrolate": "D006024", "sodium acetylsalicylate": "-1", "pentylenetetrezol": "D010433", "flestolol": "C047847", "acc-9089": "C047847", "nitrendipine": "D009568", "perhexiline maleate": "C023470", "methscopolamine": "D019832", "alpha-methyltyrosine": "D019805", "p-chlorophenylalamine": "D010134", "fk 506": "D016559", "fr 139317": "C079574", "organophosphorus (op) poisons": "D009943", "op compound": "D009943", "selective serotonin reuptake inhibitor": "D017367", "ssri": "D017367", "hepatitis-b surface antigen": "D006514", "lamivudin": "D019259", "nucleoside": "D009705", "ginsenoside rg1": "C035054", "rg1": "C035054", "ginsenoside": "D036145", "gum arabic": "D006170", "gm": "D005839", "zonisamide": "C022189", "cycloheximide": "D003513", "dopamine agonist": "D018491", "dopac": "D015102", "hva": "D006719" }, "disease2id": { "hypertensive": "D006973", "hypotensive": "D007022", "cardiac asystole": "D006323", "depression": "D003866", "bradyarrhythmias": "D001919", "fasciculations": "D005207", "tetanic": "D013746", "fasciculation": "D005207", "twitch": "D013746", "tetanus": "D013746", "overdosage": "D062787", "chronic renal failure": "D007676", "nephropathy": "D007674", "renal failure": "D051437", "proteinuria": "D011507", "hypertension": "D006973", "glomerulosclerosis": "D005921", "crohn's disease": "D003424", "nausea": "D009325", "inflammatory bowel disease": "D015212", "myocardial injury": "D009202", "schizophrenic": "D012559", "myocardial infarction": "D009203", "ischemia": "D007511", "bundle branch block": "D002037", "tardive dystonia": "D004421", "tardive dyskinesia": "D004409", "parkinsonism": "D010302", "dystonia": "D004421", "ocular toxicity": "D005128", "auditory toxicity": "D006311", "visual toxicity": "D014786", "dyschromatopsy": "-1", "a loss of visual acuity": "D014786", "pigmentary retinal deposits": "D012164", "neurosensorial hearing loss": "D006319", "hearing loss": "D034381", "toxicity": "D064420", "myasthenia gravis": "D009157", "neuromuscular disease": "D009468", "quadriplegic": "D011782", "preeclampsia": "D011225", "postsynaptic neuromuscular blockade": "D009468", "paralysis": "D010243", "disorder of neuromuscular transmission": "D020511", "hemorrhagic": "D006470", "cystitis": "D003556", "bladder damage": "D001745", "pain": "D010146", "migraine": "D008881", "hepatitis": "D056486", "extensive hepatocellular necrosis": "D047508", "hepatotoxicity": "D056486", "cushing's syndrome": "D003480", "pulmonary insufficiency": "D011665", "renal insufficiency": "D051437", "intravascular coagulation": "D004211", "trauma": "D014947", "sepsis": "D018805", "renal damage": "D007674", "kidney damage": "D007674", "hypotension": "D007022", "reductions in mean arterial blood pressure": "D007022", "increase in heart rate and cardiac output": "D016534", "seizures": "D012640", "weight gain": "D015430", "tachycardia": "D013610", "death": "D003643", "gestational trophoblastic disease": "D031901", "choriocarcinoma": "D002822", "pulmonary obstruction": "D011655", "tumor": "D009369", "necrosis": "D009336", "embolism": "D004617", "pelvic tumor": "D010386", "sexual dysfunction": "D012735", "arthritis": "D001168", "rheumatoid arthritis": "D001172", "osteoarthritis": "D010003", "spondyloarthropathy": "D025242", "depressed mood": "D003866", "sexual dysfunctions": "D012735", "impotence": "D007172", "urothelial cancer": "D014523", "renal papillary necrosis": "D007681", "cancer of the renal pelvis": "D007680", "cancer of the ureter": "D014516", "cancer of the bladder": "D001749", "ureteric cancer": "D014516", "cancers": "D009369", "hemolytic anemia": "D000743", "leprosy": "D007918", "hemolysis": "D006461", "infection": "D007239", "coronary artery disease": "D003324", "shock": "D012769", "av block": "D054537", "impairment of ventricular function": "D018754", "adverse drug reactions": "D064420", "mania": "D001714", "depressed": "D003866", "organic mental disorder": "D019965", "delirium": "D003693", "manic": "D001714", "muscular rigidity": "D009127", "rigidity": "D009127", "akinetic": "D018476", "hyperkinetic": "D006948", "compression neuropathy": "D009408", "neuropathy of the radial nerve": "D020425", "myopathy": "D009135", "acute renal failure": "D058186", "cirrhosis": "D005355", "sporotrichosis": "D013174", "renal dysfunction": "D007674", "pleural effusion": "D010996", "pericardial effusion": "D010490", "neuropathy": "D009422", "sinuatrial disease": "D002318", "supraventricular tachyarrhythmias": "D013617", "pneumonitis": "D011014", "pleural effusions": "D010996", "pericardial effusions": "D010490", "proximal motor neuropathy": "D009468", "hyperkalemia": "D006947", "ascites": "D001201", "cor pulmonale": "D011660", "oliguria": "D009846", "dizziness": "D004244", "bradycardia": "D001919", "cardiomegaly": "D006332", "infarction": "D007238", "hypercalcemia": "D006934", "hyperphosphatemia": "D054559", "milk fever": "D010319", "diseases of peripheral nerves": "D010523", "peripheral nerve disease": "D010523", "sensori-motor neuropathy": "D010523", "guillain-barr syndrome": "D020275", "motor neuropathy": "D010523", "peripheral neuropathy": "D010523", "nutritional deficiency": "D044342", "diabetes mellitus": "D003920", "autonomic neuropathy": "D009422", "cranial neuropathy": "D003389", "malignancies": "D009369", "connective tissue disorders": "D003240", "neuropathies": "D009422", "depressive disorder": "D003866", "depressive illness": "D003866", "blurred vision": "D014786", "dry mouth": "D014987", "panic disorder": "D016584", "agoraphobia": "D000379", "panic disorders": "D016584", "impaired immediate free recall": "D008569", "delayed free recall was also impaired": "D008569", "behavioral impairment": "D001523", "bladder carcinomas": "D001749", "forestomach tumors": "D013274", "carcinogenesis": "D063646", "diabetic autonomic neuropathy": "D003929", "diabetic": "D003920", "adenohypophyseal tumors": "D010911", "pituitary tumors": "D010911", "nephrolithiasis": "D053040", "cardiotoxicity": "D066126", "cardiotoxic": "D066126", "neurotoxic": "D020258", "abnormal movements": "D004409", "deafness": "D003638", "dyskinesias": "D004409", "glomerulonephritis": "D005921", "pulmonary tuberculosis": "D014397", "proteinuric injury": "D011507", "sleep disturbance": "D012893", "angioedema": "D000799", "pulmonary edema": "D011654", "chest pain": "D002637", "myocardial infarctions": "D009203", "arthrogryposis": "D001176", "deformations": "D009140", "excessive flexion or extension of one or more toes": "D009140", "cranial hemorrhage": "D002543", "axonal damage": "D001480", "axonal injury": "D001480", "injury in the cortex": "D001480", "infarcts in substantia nigra pars reticulata": "D002544", "status epilepticus": "D013226", "traumatic": "D014947", "parkinson disability": "D009069", "parkinson's disease": "D010300", "motor disability": "D009069", "idiopathic parkinson's disease": "D010300", "cardiovascular toxicities": "D002318", "anaphylaxis": "D000707", "critically ill": "D016638", "allergic reactions": "D004342", "autoimmune hemolytic anemia": "D000744", "erythroblastocytopenia": "-1", "extrapyramidal symptoms": "D001480", "eps": "D001480", "orthostatic hypotension": "D007024", "seizure": "D012640", "agranulocytosis": "D000380", "thrombosis": "D013927", "platelet aggregation": "D001791", "blood coagulation": "D001778", "platelet aggregations": "D001791", "nonsmall cell lung carcinoma": "D002289", "nsclc": "D002289", "neutropenia": "D009503", "thrombocytopenia": "D013921", "neurotoxicity": "D020258", "myelosuppression": "D001855", "artery calcification": "D061205", "calcification": "D002114", "calcification of the artery": "D061205", "bipolar": "D001714", "bipolar depression": "D001714", "bipolar disorder": "D001714", "hypomanic": "D001714", "dsm-iv bipolar i": "D001714", "bipolar ii": "D001714", "hypomania": "D001714", "bipolar i": "D001714", "cardiac arrhythmia": "D001145", "mitral valve prolapse": "D008945", "ventricular fibrillation": "D014693", "retention of urine": "D016055", "chest wall rigidity": "D009127", "respiratory depression": "D012140", "urinary bladder retention": "D001745", "hydronephrosis": "D006869", "cardiomyopathy": "D009202", "aids": "D000163", "cm": "D009202", "mitochondrial dysfunction": "D028361", "locomotor hyperactivity": "D009069", "hyperlocomotion": "D009069", "cholestatic": "D002779", "jaundice": "D007565", "cholestasis": "D002779", "nephrotic syndrome": "D009404", "headache": "D006261", "migraineurs (without aura)": "D020326", "coronary aneurysm": "D003323", "aneurysm": "D000783", "vessel rupture": "-1", "penile erection": "D010409", "psychiatric disorders": "D001523", "dysphonia": "D055154", "tissue injury": "D017695", "secondary hyperalgesia": "D006930", "neurogenic hyperalgesia": "D006930", "neuropathic pain": "D009437", "hyperalgesia": "D006930", "impaired social and emotional judgement processes": "D003072", "memory deficits": "D008569", "hyperlipidemia": "D006949", "tetany": "D013746", "muscle contractions": "C536214", "hypocalcemia": "D006996", "chronic allograft nephropathy": "D051436", "can": "D007674", "neoplasia": "D009369", "kaposi's sarcoma": "D012514", "skin cancers": "D012878", "intestinal tumors": "D007414", "renal cell carsinom": "D002292", "nephrotic": "D009404", "membranoproliferative glomerulopathy": "D015433", "interstitial nephritis": "D009395", "dementia": "D003704", "inability to sleep": "D007319", "tiredness": "D005221", "depressive symptoms": "D003866", "vocal fold palsy": "D014826", "overdose": "D062787", "quadriparesis": "D011782", "sensory loss": "C580162", "paresthesia": "D010292", "ataxia": "D001259", "giddiness": "D004244", "hoarseness": "D006685", "polyneuropathy": "D011115", "palsy": "D010243", "hit": "D013921", "thrombotic": "D013927", "retinal vein occlusion": "D012170", "infertility": "D007247", "thromboembolic": "D013923", "visual disturbance": "D014786", "nystagmus": "D009759", "nin": "D009759", "gastric hemorrhagic": "D006471", "ulcers": "D014456", "atherosclerotic": "D050197", "gastric hemorrhage": "D006471", "ulcer": "D014456", "atherosclerosis": "D050197", "heart failure": "D006333", "confusion": "D003221", "learning deficits": "D007859", "impairments in learning": "D007859", "impairments in memory": "D008569", "deficits in learning": "D007859", "deficits in memory": "D008569", "hyperactivity": "D006948", "intraoperative bleeding": "D016063", "postoperative bleeding": "D019106", "hepatic impairment": "D008107", "coagulopathy": "D001778", "acute liver failure": "D017114", "alf": "D017114", "hepatitis virus infection": "D006525", "tuberculosis": "D014376", "icterus": "D007565", "encephalopathy": "D001927", "cerebral edema": "D001929", "gastrointestinal bleed": "D006471", "hepatitis e": "D016751", "central nervous system complications": "D002493", "acute lymphoblastic leukemia": "D054198", "central nervous system (cns) complications": "D002493", "all": "D054198", "neurological complications": "D002493", "leukemic infiltration": "D017254", "neurocognitive defects": "D002493", "leukoencephalopathy": "D056784", "stroke": "D020521", "temporal lobe epilepsy": "D004833", "inappropriate antidiuretic hormone secretion": "D007177", "tumors": "D009369", "colorectal cancers": "D015179", "breast cancers": "D001943", "head and neck cancers": "D006258", "renal and kidney disease": "D007674", "pancreaticobiliary cancers": "D010190", "gastric cancers": "D013274", "renal cell cancers": "D002292", "diarrhea": "D003967", "vomiting": "D014839", "stomatitis": "D013280", "hand-foot syndrome": "D060831", "hepatic dysfunctions": "D008107", "renal dysfunctions": "D007674", "parkinsonian catalepsy": "D002375", "parkinsonian symptoms": "D010302", "psychiatric": "D001523", "depressions": "D003866", "decrease in arterial blood pressure": "D007022", "decreased cardiac output": "D002303", "decreases in arterial blood pressure": "D007022", "decreases in cardiac output": "D002303", "acute myocardial infarction": "D009203", "atherosclerotic obstruction": "D050197", "coronary occlusion": "D054059", "spasm": "D013035", "thrombus": "D013927", "rabbit syndrome": "D001480", "decreased basal ganglia perfusion": "D001480", "movement disorder": "D009069", "chronic obstructive pulmonary disease": "D029424", "cardiovascular systems": "D002318", "gastrointestinal systems": "D005767", "chronic airflow obstruction": "D029424", "rpn": "D007681", "polyuria": "D011141", "hyperthermia": "D005334", "toxicities": "D064420", "leukopenia": "D007970", "kidney injury": "D007674", "cardiac lesions": "D006331", "renal lesions": "D007674", "stress incontinence": "D014550", "incontinence": "D014549", "myocardial ischemia": "D017202", "coronary arterial stenosis": "D023921", "acute coronary insufficiency": "D054058", "akathisia": "D017109", "obsessive compulsive disorder": "D009771", "major depression": "D003865", "anxiety": "D001008", "chronic active hepatitis": "D006521", "abnormalities of liver function": "D056486", "intracranial aneurysms": "D002532", "arteriovenous malformations": "D001165", "cerebral vasculitis": "D020293", "cerebral infarction": "D002544", "intracerebral hemorrhage": "D002543", "subarachnoid hemorrhage": "D013345", "intracranial hemorrhage": "D020300", "type ii diabetes mellitus": "D003924", "acute hepatitis-like syndrome": "D056486", "viral infection": "D014777", "drug-induced hepatitis": "D056486", "acute hepatitis-like illness": "D056486", "hemorrhage": "D006470", "hem": "D006470", "hypovolemia": "D020896", "cerebral ischaemia": "D002545", "vasospasm": "D020301", "gangrene": "D005734", "cerebral aneurysms": "D002532", "cerebral aneurysm": "D002532", "subarachnoid haemorrhage": "D013345", "allergic reaction": "D004342", "angioneurotic edema": "D000799", "carcinoma of the oral cavity": "D009062", "impaired renal function": "D007674", "sinoatrial block": "D012848", "primary cardiomyopathy": "D009202", "wolff-parkinson-white syndrome": "D014927", "supraventricular tachycardia": "D013617", "sinus bradycardia": "D012804", "congenital anomalies": "D000013", "ulcerative colitis": "D003093", "coarctation of the aorta": "D001017", "ventricular septal defect": "D006345", "potter-type iia polycystic kidney": "D007690", "rudimentary left uterine cornu": "-1", "potter's facies": "-1", "hypoplastic lungs": "-1", "absent kidneys and ureters": "-1", "talipes equinovarus": "D003025", "veno-occlusive liver disease": "D006504", "melanoma": "D008545", "veno-occlusive disease of the liver": "D006504", "venous congestion": "D006940", "abnormal involuntary movements": "D004409", "gastrointestinal disorder": "D005767", "intra-hisian block": "D006327", "atrial tachycardia": "D013617", "intraventricular conduction abnormalities": "D006345", "atrial flutter": "D001282", "carcinoma": "D002277", "increase in blood pressure": "D006973", "toxic hepatitis": "D056486", "hepatic adverse effects": "D056486", "hyperthyroidism": "D006980", "bone marrow suppression": "D001855", "human immunodeficiency virus (hiv)-infected": "D015658", "myelotoxicity": "D001855", "gastric adenocarcinoma": "D013274", "disorientation": "D003221", "irritability": "D001523", "coma": "D003128", "organic psychotic": "D019965", "erythema multiforme": "D004892", "hypersensitivity myocarditis": "D009205", "septicemia": "D018805", "congestive heart failure": "D006333", "myocarditis": "D009205", "infections": "D007239", "drug-induced allergic reaction": "D004342", "allergy": "D004342", "allergic": "D004342", "urticaria": "D014581", "mdm": "D007645", "end-stage renal disease": "D007676", "esrd": "D007676", "nephrotoxic": "D007674", "diabetic nephropathy": "D003928", "diabetes": "D003920", "albuminuria": "D000419", "renal toxicity": "D007674", "nephrotoxicity": "D007674", "cognitive and functional deficits": "D003072", "tremor": "D014202", "cognitive deficits": "D003072", "loss of creativity": "D003072", "functional impairments": "D003072", "cognitive and functional impairments": "D003072", "cognitive, motivational, or creative deficits": "D003072", "breast cancer": "D001943", "impaired heart function": "D006331", "diabetes insipidus": "D003919", "dehydration": "D003681", "cardiac arrest": "D006323", "hyperkalaemia": "D006947", "meningitis": "D008581", "hypersensitivity": "D004342", "atrial fibrillation": "D001281", "agitation": "D011595", "incomprehensible shouts": "D019954", "loss of consciousness": "D014474", "dupuytren's contracture": "D004387", "iga nephropathy": "D005922", "focal segmental glomerulosclerosis": "D005923", "interstitial fibrosis": "D005355", "amnesia": "D000647", "respiratory arrest": "D012131", "desaturation": "D001049", "neuromuscular blockade": "D020879", "venous thromboembolism": "D054556", "vte": "D054556", "aggressive behavior": "D010554", "aggressiveness": "D010554", "urinary incontinence": "D014549", "spasticity": "D009128", "disorders of the central nervous system": "D002493", "kidney disease": "D007674", "transplant glomerulopathy": "D007674", "tg": "D007674", "glomerulopathies": "D007674", "thrombotic microangiopathy": "D057049", "malignant hypertension": "D006974", "acute tubular necrosis": "D007683", "lupus nephritis": "D008181", "henoch-schonlein nephritis": "D011695", "kidney diseases": "D007674", "endothelial injury": "D014947", "immunologic injury": "D007154", "convulsions": "D012640", "convulsive": "D012640", "torsades de pointes": "D016171", "tdp": "D016171", "ventricular tachycardia": "D017180", "myeloencephalopathy": "D001927", "acute lymphoblastic leucemia": "D054198", "lymphoblastic lymphoma": "D054198", "opistothonus dysfunction": "D020258", "sensory dysfunction": "D020258", "motor dysfunction": "D020258", "degeneration of myelin": "D003711", "degeneration of axons": "D009410", "pseudocystic transformation": "-1", "aplastic anemia": "D000741", "convulsion": "D012640", "convulsants": "D012640", "renal injury": "D007674", "reduction in glomerular number": "D007674", "elevated blood pressures": "D006973", "acne": "D000152", "hirsutism": "D006628", "polycystic ovary syndrome": "D011085", "pcos": "D011085", "pseudoacromegaly": "D004194", "acromegaly": "D000172", "endocrine disorder": "D004700", "hypertrophy": "D006984", "cutis verticis gyrata": "C535610", "anemia": "D000740", "prostate cancer": "D011471", "prostatic adenocarcinoma": "D000230", "dilated cardiomyopathy": "D002311", "coccidioidomycosis": "D003047", "nocturnal leg cramps": "D020922", "neuropathological damages": "D004194", "poisoning": "D011041", "a reduced locomotor activity": "D001523", "osteoporosis": "D010024", "cataracts": "D002386", "gallbladder disease": "D005705", "endometrial hyperplasia": "D004714", "endometrial cancer": "D016889", "biliary pseudolithiasis": "D001660", "pseudolithiasis": "D001660", "gallbladder dysfunction": "D005705", "cocaine overdose": "D062787", "osteopenia": "D001851", "amenorrhea": "D000568", "prolonged qt interval": "D008133", "fungal infection": "D009181", "long qt syndrome": "D008133", "cognitive dysfunctions": "D003072", "cauda equina syndrome": "D011128", "radiculopathy": "D011843", "low back pain": "D017116", "numbness": "D006987", "lower extremity weakness": "D020335", "loss of sensation": "D006987", "neurologic deterioration": "D009422", "autoimmunity": "D001327", "cystic renal diseases": "D052177", "renal cancer": "D007680", "acute renal dysfunction": "D058186", "chronic renal damage": "D051436", "hyperckaemia": "-1", "necrotic": "D009336", "cardiovascular disease": "D002318", "arrhythmia": "D001145", "coronary heart disease": "D003327", "hearing impairment": "D034381", "decreases of teoaes amplitudes": "-1", "neuroinflammation": "D020078", "behavioral abnormalities": "D001523", "autism": "D001321", "neurodevelopmental disorder": "D002658", "deficits in communication and social skills": "D003147", "repetitive behaviors": "D001523", "preterm labor": "D007752", "autism spectrum disorders": "D002659", "schizophrenia": "D012559", "psychosis": "D011605", "idiopathic epilepsy": "C562694", "tonic-clonic seizures": "D004830", "impaired word fluency, psychomotor speed and working memory": "D008569", "attention-deficit/hyperactivity disorder": "D001289", "adhd": "D001289", "drowsiness": "D006970", "non-hodgkin lymphomas": "D008228", "cancer": "D009369", "leukemia": "D007938", "multiple myeloma": "D009101", "mantle cell lymphoma": "D020522", "lymphoplasmacytic lymphoma": "D008223", "tumour": "D009369", "lymphomas": "D008223", "fatigue": "D005221", "somnolence": "D006970", "dyspnea": "D004417", "priapism": "D011317", "ocular myasthenia": "D009157", "chronic hepatitis c": "D019698", "diplopia": "D004172", "chc": "D019698", "ptosis on the right upper lid": "D001763", "restricted right eye movement": "D015835", "catalepsy": "D002375", "reduces frontal lobe oxygenation": "D002534", "a decrease in map": "D007022", "a decrease in co": "D002303", "obsessive-compulsive-like behaviors": "D009771", "obsessive-compulsive disorder": "D009771", "ocd": "D009771", "behavioral inflexibility": "-1", "memory impairment": "D008569", "hoarding": "D060845", "corticostriatal dysfunction": "-1", "hemolytic uremic syndrome": "D006463", "hus": "D006463", "anuria": "D001002", "microangiopathic hemolytic anemia": "D000743", "depressive": "D003866", "glaucomatous": "D005901", "malignant mesenchymal tumors": "C535700", "malignant mesenchymal tumor": "C535700", "glucosuria": "D006030", "aminoaciduria": "D000608", "renal abnormalities": "D007674", "fanconi's syndrome": "D005198", "tdfs": "D005198", "mesenchymal tumors": "C535700", "increase in locomotor activity": "D006948", "psychoses": "D011605", "anxiety neurosis": "D001008", "disturbances of sleep-wake rhythm": "D012893", "malaria": "D008288", "duchenne dystrophy": "D020388", "weakness": "D018908", "decreased appetite": "D001068", "gastrointestinal symptoms": "D012817", "hyperemia": "D006940", "peripheral vascular disease": "D016491", "intermittent claudication": "D007383", "cerebral haemorrhage": "D002543", "pneumonia": "D011014", "bronchitis": "D001991", "neoplasms": "D009369", "heart diseases": "D006331", "septicaemia": "D018805", "deaths": "D003643", "parkinsonian": "D010300", "argentine hemorrhagic fever": "D006478", "ahf": "D006478", "viremia": "D014766", "angina": "D000787", "somatic rigidity": "D009127", "head and neck carcinoma": "D006258", "esophageal carcinoma": "D004938", "sudden death": "D003645", "tachyarrhythmia": "D013610", "auditory neurotoxicity": "D006311", "visual neurotoxicity": "D014786", "abnormal audiograms with deficits mostly in the high frequency range of 4,000 to 8,000 hz": "D006316", "permanent disability": "D003638", "auditory abnormality": "D006311", "ototoxicity": "D006311", "auditory dysfunction": "D006311", "juvenile rheumatoid arthritis": "D001171", "tender joints": "-1", "swelling": "D004487", "tenderness": "-1", "morning stiffness": "-1", "fecal occult blood": "-1", "gastrointestinal (gi) bleeding": "D006471", "abdominal pain": "D015746", "neuropathic damage": "D009422", "cord damage": "D013118", "neuropsychiatric symptoms": "D001523", "psoriasis": "D011565", "fibrosis": "D005355", "liver damage": "D056486", "ebstein's anomaly": "D004437", "cardiac malformations": "D006331", "infarcts": "D007238", "cardiac morphological alterations": "D009202", "l1210 leukemia": "D007939", "ehrlich ascites tumor": "D002286", "malignant gliomas": "D005910", "astrocytomas": "D001254", "edema": "D004487", "loss of vision": "D014786", "retinal vasculitis": "D031300", "visual loss": "D014786", "liver disease": "D008107", "clear cell adenocarcinoma": "D018262", "adenocarcinoma of the vagina": "D014625", "enlargement of the liver": "D006529", "cirrhosis of the liver": "D008103", "splenomegaly": "D013163", "atrophy": "D001284", "diabetes-insipidus-like syndrome": "D003919", "polydipsia": "D059606", "enuresis": "D004775", "aggression": "D001523", "impaired memory": "D008569", "weight loss": "D015431", "nephrosis": "D009401", "nephrotic syndromes": "D009404", "hypoalbuminemia": "D034141", "hypercholesterolemia": "D006937", "penile pain": "D004414", "erectile dysfunction": "D007172", "gallstone": "D042882", "acromegalic": "D000172", "gallstones": "D042882", "acute cholecystitis": "D041881", "cholecystitis": "D002764", "dyskinesia": "D004409", "pd": "D010300", "neurological disorders": "D009422", "cerebellum damage": "D002526", "hepatic damage": "D056486", "hyperammonemia": "D022124", "endophthalmitis": "D009877", "retinal toxicity": "D012164", "streptococcal endophthalmitis": "D013290", "telangiectasis": "D013684", "ischaemia": "D007511", "atrioventricular reentrant tachycardia": "D013611", "idiopathic dilated cardiomyopathy": "D002311", "wpw syndrome": "D014927", "avrt": "D013611", "halothane hepatitis": "C562477", "liver injury": "D056486", "sensorineural hearing loss": "D006319", "neurological deficit": "D009461", "loss of pinprick sensation": "D012678", "postoperative pain": "D010149", "vanishing bile duct": "D001649", "stevens-johnson syndromes": "D013262", "stevens-johnson syndrome": "D013262", "vanishing bile duct syndrome": "D001649", "cholestatic disease": "D002779", "primary pulmonary hypertension": "D006976", "choreoathetoid movements": "D002819", "choreatiform": "D002819", "hyperkinesias": "D006948", "movement abnormalities": "D020820", "mood disorder": "D019964", "mood disorders": "D019964", "cimd": "D019970", "depressive disorders": "D003866", "hemolytic": "D006461", "cardiac hypertrophy": "D006332", "tma": "D057049", "chronic renal insufficiency": "D051436", "systolic dysfunction": "D006331", "renal artery stenosis": "D012078", "renovascular disease": "D014652", "lung toxicity": "D008171", "tissue damage": "D017695", "palpebral twitching": "D004409", "constipation": "D003248", "constipating": "D003248", "chronic pain": "D059350", "asterixis": "D020820", "myoclonus": "D009207", "plasmacytoma": "D010954", "structural lesions of the brain": "D001927", "metabolic abnormalities": "D008659", "substance abuse": "D019966", "visual field defects": "D005128", "vfd": "D005128", "bleeding": "D006470", "hematoma": "D006406", "ich": "D002543", "hippocampal injury": "D001930", "se": "D013226", "neurologic sequelae": "D009422", "weakness of extremities": "D018908", "legs paralysis": "D010243", "dysarthria": "D004401", "vascular dysfunctions": "D002561", "cluster headache": "D003027", "intracranial vascular disturbances": "D002561", "increases in dural and cortical blood flow": "D006940", "increases in dural blood flow": "D006940", "hypolocomotion": "D006948", "tremors": "D014202", "epileptic": "D004827", "high-frequency hearing loss": "D006316", "ototoxic": "D006311", "amphetamine abuse": "D019969", "ischaemic stroke": "D002544", "ischaemic strokes": "D002544", "vasculitis": "D014657", "cerebral haemorrhages": "D002543", "haemorrhage": "D006470", "cataract": "D002386", "nausea, vomiting": "D020250", "postoperative emetic symptoms": "D020250", "chronic kidney disease": "D051436", "cognitive decline": "D003072", "alzheimer's disease": "D000544", "neuronal death": "D009410", "decline of cognitive function": "D003072", "hepatitis b virus (hbv) infected": "D006509", "hiv co-infection": "D015658", "hbv infected": "D006509", "hiv infected": "D015658", "hepatitis b": "D006509", "human immunodeficiency virus (hiv) co-infection": "D015658", "hbv mono-infected": "D006509", "acute pain": "D059787", "neurodegeneration": "D009422", "cns damage": "D009422", "brachial neuritis": "D020968", "headaches": "D006261", "neurological deficits": "D009461", "myelitis": "D009187", "brachial plexitis": "D020968", "valvular heart disease": "D006349", "valvular heart abnormalities": "D006349", "valvular heart abnormality": "D006349", "aortic regurgitation": "D001022", "mitral regurgitation": "D008944", "valvular regurgitation": "D006349", "valve regurgitation": "D006349", "cranial nerve dysfunction": "D003389", "adverse effect on the proximal eighth nerve": "D000160", "cranial nerve deficits": "D003389", "volume retention": "D016055", "lipidemia": "D006949", "uremia": "D014511", "pneumocystis pneumonia": "D011020", "hiv-infected": "D015658", "pcp": "D011020", "opportunistic infection": "D009894", "intrahepatic cholestasis": "D002780", "liver abscess": "D008100", "declines in simple and sustained attention": "D003072", "declines in working memory, and verbal memory": "D008569", "daytime sleepiness": "D012893", "bipolar i disorder": "D001714", "psychotic": "D011618", "cardiac toxicity": "D066126", "human immunodeficiency virus infection": "D015658", "heart block": "D006327", "memory dissociation": "D008569", "angiosarcoma": "D006394", "angiosarcoma of the liver": "D008113", "adenocarcinoma": "D000230", "adenocarcinoma of the liver": "D008113", "intraarterial lesions": "D014652", "ht": "D006973", "increased sbp": "D006973", "decreased thymus (p < 0.001) and bodyweights": "D015431", "psychotic symptoms": "D011605", "drug-induced parkinsonism": "D010302", "hepatic failure": "D017093", "hepatotoxic": "D056486", "fulminant hepatic failure": "D017114", "drug toxicity": "D064420", "liver failure": "D017093", "meth mouth": "-1", "cardiac dysrhythmias": "D001145", "hallucinations": "D006212", "violent behavior": "D001523", "xerostomia": "D014987", "caries": "D003731", "tooth wear": "D057085", "bad breath": "D012120", "carious lesions": "D003731", "carious episodes": "D003731", "thyrotoxicosis": "D013971", "eating disorders": "D001068", "drug abuse": "D019966", "dopaminergic terminal damage": "D009422", "glomerular sclerosis": "D007674", "apnea": "D001049", "fractures": "D050723", "fracture": "D050723", "dislocation": "D004204", "emesis": "D014839", "clumsiness": "D001259", "ataxic movements": "D001259", "dysphoric reaction": "-1", "rheumatologic disorders": "D012216", "rheumatologic disease": "D012216", "rheumatologic diseases": "D012216", "abnormal liver function": "D056486", "myocardiopathy": "D009202", "mp": "D009202", "mr": "D008944", "hypoxia": "D000860", "airway obstruction": "D000402", "postural hypotension": "D007024", "systolic orthostatic hypotension": "D007024", "reduced the supine systolic and diastolic blood pressures": "D007024", "pains": "D010146", "renovascular hypertension": "D006978", "sudden deterioration of renal function": "D058186", "chronic active (aggressive) hepatitis": "D006521", "muscle pain": "D063806", "temporomandibular disorders": "D013705", "nociceptive muscle": "D063806", "painful muscle": "D063806", "flushing of the face": "D005483", "flushing": "D005483", "arterial hypotension": "D007022", "hypothermia": "D007035", "neuromuscular disorders": "D009468", "myotonia": "D009222", "renal calculi": "D007669", "renal calculus": "D007669", "calculus": "D002137", "scabies": "D012532", "toxic to the central nervous system": "D002493", "aplastic anaemia": "D000741", "masseter spasm": "D014313", "myotonia congenita": "D009224", "methemoglobinemia": "D008708", "hemoglobinuria": "D006456", "myoglobinuria": "D009212", "liver toxicity": "D056486", "kidney toxicity": "D007674", "atypical sensations": "D010292", "tingling or burning sensations": "D010292", "sunburn": "D013471", "obstructive lung disease": "D008173", "ventricular tachycardias": "D017180", "haemolytic anaemia": "D000743", "urinary tract infection": "D014552", "haemolysis": "D006461", "petechiae": "D011693", "purpura": "D011693", "haemorrhages": "D006470", "bone marrow depression": "D001855", "thrombi": "D013927", "microangiopathies": "D014652", "compartment syndrome": "D003161", "myonecrosis": "D009135", "hypothyroidism": "D007037", "hypothyroid": "D007037", "arteriopathic": "D014652", "bile duct hamartoma": "D001650", "hamartoma": "D006222", "liver mass": "D008107", "granulomatous": "D006099", "granulomas": "D006099", "cholestatic syndrome": "D002779", "eosinophilia": "D004802", "cocaine abuse": "D019970", "aneurysms": "D000783", "ruptured aneurysms": "D017542", "aneurysmal rupture": "D017542", "absence seizures": "D004832", "absence epilepsy": "D004832", "absence seizure": "D004832", "epilepsy": "D004827", "premature ventricular contractions": "D018879", "q-t prolongation": "D008133", "syncope": "D013575", "prolonged q-t syndrome": "D008133", "initiation induced by carcinogens": "D011230", "initiation of carcinogenic process": "D011230", "withdrawal syndrome": "D013375", "emergent rabbit syndrome": "D001480", "rs": "D001480", "withdrawal rs": "D013375", "emergent rs": "D001480", "meningeal leukemia": "D008577", "meningeal disease": "D002493", "mucositis": "D052016", "transient hemiparesis": "D010291", "leukemoid reaction": "D007955", "erythroderma": "D003873", "hyponatremia": "D007010", "locomotor hypoactivity": "D009069", "rhabdomyolysis": "D012206", "human immunodeficiency virus": "D015658", "squamous cell esophageal carcinoma": "C562729", "granulocytopenia": "D000380", "peripheral neurotoxicity": "D010523", "esophageal squamous cell carcinoma": "C562729", "ocular hypotensive": "D015814", "decreases in systolic blood pressure": "D007022", "conjunctival blanching": "D003229", "mydriasis": "D015878", "entropion": "D004774", "corneal abrasion": "D003316", "organic mental disorders": "D019965", "organic personality syndrome": "D010554", "frontal lobe syndrome": "D001927", "structural damage to the frontal lobe": "D001927", "gastrointestinal problems": "D012817", "intracranial bleeding": "D013345", "wheezing": "D012135", "respiratory distress": "D012128", "rash": "D005076", "cardiovascular alterations": "D018376", "cardiovascular malformations": "D018376", "infarct": "D007238", "cardiac infarction": "D009203", "disruptive behaviors": "D019958", "psychomotor agitation": "D011595", "extrapyramidal signs": "D001480", "renal interstitial damage": "D007674", "focal glomerulosclerosis": "D005923", "lid": "D004409", "conjunctivitis": "D003231", "pancreatitis": "D010195", "impaired consciousness": "D003244", "haemolytic-uraemic syndrome": "D006463", "liver dysfunction": "D008107", "hepatic dysfunction": "D008107", "extrahepatic cholestasis": "D001651", "beta-thalassemic": "D017086", "beta-thalassemia": "D017086", "snhl": "D006319", "thalassemic": "D013789", "inferior colliculus lesion": "D001927", "inferior colliculus lesions": "D001927", "callosal lesion": "D001927", "apnoea": "D001049", "obstructive (p less than 0.05) apnoea": "D020181", "central apnoea": "D020182", "tachyarrhythmias": "D013610", "ventricular ectopic beats": "D018879", "overdosages": "D062787", "cerebellar atrophy": "D002526", "cerebellar disorders": "D002526", "myocardial damage": "D009202", "myocardial cell injury": "D009202", "cardiac disorders": "D006331", "ischemic injury": "D017202", "cips": "-1", "bone marrow": "D001855", "oedema": "D004487", "reflex sympathetic dystrophy": "D012019", "morton's neuralgia": "D009437", "gout": "D006073", "avascular necrosis": "D010020", "foot deformities": "D005530", "stress fractures": "D015775", "hyperparathyroidism": "D006961", "neurological disturbance": "D009422", "optic atrophy": "D009896", "acrodermatitis enteropathica": "C538178", "myelopathy": "D013118", "myelo neuropathy": "D013118", "optic neuropathy": "D009901", "epileptic seizures": "D004827", "generalized seizures": "D012640", "sugar dependency": "D019966", "behavioral cross-sensitization": "D006948", "drug dependency": "D019966", "hyperactive": "D006948", "angiopathy": "D001018", "cardiac dysfunction": "D006331", "acute leukemia": "D015470", "birth defects": "D000014", "birth defect": "D000014", "anencephaly": "D000757", "hypoplastic left heart syndrome": "D018636", "choanal atresia": "D002754", "transverse limb deficiency": "D017880", "diaphragmatic hernia": "D006548", "anophthalmia": "D000853", "microphthalmos": "D008850", "atrial septal defects": "D006344", "cleft lip": "D002971", "cleft palate": "D002972", "synovitis": "D013585", "acute nonlymphocytic leukemia": "D015470", "non-hodgkin's lymphoma": "D008228", "carcinoma of the bladder": "D001749", "malignancy": "D009369", "hepatic injury": "D056486", "hepatomegaly": "D006529", "anorexia": "D000855", "fatty change": "D005234", "massive hepatic necrosis": "D047508", "acute hepatitis": "D017114", "fulminant hepatitis": "D017114", "head and neck carcinomas": "D006258", "head and neck cancer": "D006258", "ovarian cancer": "D010051", "alopecia": "D000505", "paresthesias": "D010292", "arthralgias": "D018771", "myalgias": "D063806", "myalgia": "D063806", "phlebitis": "D010689", "mesothelioma": "D008654", "leiomyosarcoma": "D007890", "basal cell carcinoma": "D002280", "orbital toxicity": "D009916", "glioblastomas": "D005909", "glioblastoma": "D005909", "malignant tumor": "D009369", "pain in the ipsilateral eye": "D058447", "visual disturbance in the ipsilateral eye": "D014786", "glaucoma": "D005901", "ocular pain": "D058447", "papilledema": "D010211", "retinal detachment": "D012163", "chorioretinal atrophy": "C566236", "non-small cell lung cancer": "D002289", "squamous carcinoma": "D002294", "bronchio-alveolar carcinoma": "D002282", "undifferentiated carcinoma": "D002277", "squamous cell carcinoma": "D002294", "mitochondrial damage": "D028361", "toxic neurodegenerative cascade": "D009410", "mitochondrial impairment": "D028361", "mitochondrial toxicity": "D028361", "peripheral nerve toxicity": "D010523", "strokes": "D020521", "embolic events": "D004617", "cardiac remodelling": "D020257", "acute t-lymphocytic leukemia": "D054218", "hyperbilirubinemia": "D006932", "hematotoxicity": "D006402", "hematologic disturbances": "D006402", "blood dyscrasias": "D006402", "cytopenias": "D006402", "hematologic syndrome": "D006402", "cytopenia": "D006402", "behavioral disorder": "D002653", "behavioral deterioration": "D002653", "hyperkinesis": "D006948", "sleeping difficulties": "D012893", "hypereosinophilia": "D004802", "pruritus": "D011537", "hypercholesterolaemia": "D006937", "lupus erythematosus": "D008180", "cardiac tamponade": "D002305", "lupus": "D008180", "serositis": "D012700", "lupus syndrome": "D008180", "gastrointestinal disorders": "D005767", "sleep disturbances": "D012893", "parasomnias": "D020447", "cerebral hypoxia": "D002534", "comatose": "D003128", "stuporous": "D053608", "withdrawal symptoms": "D013375", "neurological sequelae": "D009422", "hypoxaemia": "D000860", "postoperative nausea and vomiting": "D020250", "renal fanconi syndrome": "D005198", "mitochondrial cytopathy": "C540770", "wilson's disease": "D006527", "hepatitis b infection": "D006509", "metabolic acidosis": "D000138", "hypophosphatemia": "D017674", "glycosuria": "D006029", "acidosis": "D000138", "muscle weakness": "D018908", "fanconi syndrome": "D005198", "mitochondrial disorders": "D028361", "tubular dysfunction": "D005198", "platypnea-orthodeoxia-like syndrome": "-1", "platypnea-orthodeoxia": "-1", "cyanosis": "D003490", "patent foramen ovale": "D054092", "biventricular dysfunction": "D018754", "breast carcinoma": "D001943", "neurologic toxicity": "D020258", "femoral nerve palsy": "D020428", "muscle tear": "D009135", "contracture": "D003286", "nerve entrapment": "D009408", "partial loss of quadriceps functions": "D009135", "motor and sensory impairment": "D015417", "oliguric": "D009846", "memory impaired": "D008569", "deficit of associative memory": "D008569", "memory deficiency": "D008569", "memory deficit": "D008569", "heart disease": "D006331", "thyroid disorders": "D013959", "acute alcohol intoxication": "D000435", "ventricular septal (vsd) defects": "D006345", "midline (md) defects": "D009436", "intrauterine growth retardation": "D005317", "increase of external and skeletal variations": "D009139", "ventricular septal defects": "D006345", "midline defects": "D009436", "drug dependence": "D019966", "brain damage": "D001925", "ischemic stroke": "D002544", "inflammation": "D007249", "impaired blood clotting": "D020141", "hemodilution": "D020141", "hematomas": "D006406", "white matter edema": "D001929", "neuronal loss": "D009410", "acute hepatic failure": "D017114", "hepatocellular injury": "D056486", "haemodilution": "D020141", "impairment of hepatic function": "D008107", "dyskinetic movements": "D004409", "spontaneous recurrent seizures": "-1", "srs": "-1", "respiratory dysfunction": "D012131", "upper respiratory tract infection": "D012141", "adverse drug reaction": "D064420", "crf": "D007676", "hepatorenal syndrome": "D006530", "acute stroke": "D020521", "reduction in blood pressure": "D007022", "bp reduction": "D007022", "reduction in systolic bp": "D007022", "dbp reduction": "D007022", "transient neurologic symptoms": "D009422", "tnss": "D009422", "sinus arrest": "D054138", "cardiac disease": "D006331", "arrhythmias": "D001145", "gall bladder stones": "D042882", "gall stones": "D042882", "gall stone": "D042882", "gall stone disease": "D042882", "cardiovascular complications": "D002318", "subpial necrosis": "D013118", "bladder carcinoma": "D001749", "bladder tumors": "D001749", "ventricular tachyarrhythmias": "D014693", "prolactinomas": "D015175", "prolactinoma": "D015175", "adenoma": "D000236", "liver enlargement": "D006529", "muscle wastage": "D009133", "cardiorespiratory arrest": "D006323", "cardiovascular depression": "D002318", "hallucinosis": "D001523", "impairment of attention and memory": "D008569", "clonic fits": "D012640", "fits": "D012640", "thoracic aortic aneurysm": "D017545", "taa": "D017545", "arterial injury": "D014652", "right heart failure": "D006333", "hyperprolactinemic": "D006966", "hyperprolactinemia": "D006966", "premature atherosclerosis": "D050197", "atherosclerotic lesion": "D050197", "injury to the brain": "D001927", "mitochondrial abnormalities": "D028361", "antiphospholipid syndrome": "D016736", "systemic lupus erythematosus": "D008180", "left ventricular systolic and diastolic dysfunction": "D018487", "myocardial dysfunction": "D009202", "myocardial stunning": "D017682", "myocardial necrosis": "D009202", "urinary bladder cancer": "D001749", "wegener's granulomatosis": "D014890", "bladder cancer": "D001749", "increases in blood pressure": "D006973", "neurologic complications": "D009422", "white matter abnormalities": "D056784", "putaminal hemorrhage": "D020146", "cortical laminar necrosis": "D001927", "hypoxemia": "D000860", "pulmonary hypertension": "D006976", "fistula": "D005402", "necrotizing enterocolitis": "D020345", "exencephaly": "D009436", "exencephalic": "D009436", "hemorrhaging": "D006470", "circulatory failure": "D012769", "maha": "D000743", "suppression of motility": "D011596", "behavioral depression": "D011596", "hepatocellular carcinomas": "D006528", "hepatocellular carcinoma": "D006528", "variant angina": "D000788", "angina pectoris": "D000787", "coronary spasm": "D003329", "cardiac damage": "D006331", "respiratory failure": "D012131", "cardiac arrhythmias": "D001145", "cardiac disturbances": "D006331", "hypoventilation": "D007040", "ventricular arrhythmias": "D001145", "asystole": "D006323", "hyperhidrosis": "D006945", "hypersalivation": "D012798", "bronchorrhoea": "-1", "miosis": "D015877", "atrio-ventricular dissociation": "D006327", "dyspnoea": "D004417", "bronchospasm": "D001986", "respiratory insufficiency": "D012131", "proteus mirabilis infection": "D011512", "acute cardiovascular failure": "D002318", "phonophobia": "D012001", "photophobia": "D020795", "thrombotic thrombocytopenic purpura": "D011697", "hemolytic-uremic syndrome": "D006463", "t-cell lymphoblastic leukaemia": "D015458", "t-cell lymphoblastic lymphoma": "D016399", "t-cell leukaemia": "D015458", "t-cell lymphoma": "D016399", "musculoskeletal pain": "D059352", "haematological toxicity": "D006402", "neurological toxicity": "D020258", "dermatitis": "D003872", "fever": "D005334", "lymphadenopathy": "D008206", "lymphadenitis": "D008199", "multi-organ failure": "D009102", "massive hepatocellular necrosis": "D047508", "nephritis": "D009393", "bone marrow necrosis": "D001855", "cardiovascular toxicity": "D002318", "exanthema": "D005076", "urtication": "D014581", "itchiness": "D011537", "diabetic neuropathy": "D003929", "diabetic (streptozotocin-induced) neuropathy": "D003929", "toxic (vincristine-induced) neuropathy": "D010523", "diabetic hyperalgesia": "D006930", "toxic neuropathy": "D010523", "chf": "D006333", "myoclonic jerks": "D009207", "peptic ulcer disease": "D010437", "reflux esophagitis": "D005764", "zollinger-ellison syndrome": "D015043", "lethargy": "D053609", "shortness of breath": "D004417", "hiv antibody-positive": "D015658", "opportunistic infections": "D009894", "diarrhoea": "D003967", "glucose tolerance curves": "D018149", "chronically infected with hepatitis c virus": "D019698", "hepatitis c": "D019698", "memory impairments": "D008569", "learning impairments": "D007859", "cocaine addiction": "D019970", "amnesic": "D000647", "hematuria": "D006417", "extrapyramidal concomitant symptoms": "D001480", "psychotic syndromes belonging predominantly to the schizophrenia group": "D019967", "cognitive dysfunction": "D003072", "cognitive impairment": "D003072", "impairment of learning and memory": "D003072", "deterioration of cognitive functions": "D003072", "neuronal damage": "D009410", "hiccups": "D006606", "lids": "D004409", "pericarditis": "D010493", "graves' disease": "D006111", "febrile illness": "D005334", "low sexual desire": "D020018", "gynecomastia": "D006177", "hypogonadism": "D007006", "hypothalamic dysfunction": "D007027", "supraventricular tachyarrhythmia": "D013617", "unstable angina": "D000789", "nephrosclerosis": "D009400", "tinnitus": "D014012", "idiopathic subjective tinnitus": "D014012", "ist": "D014012", "vertigo": "D014717", "demyelinating disorder": "D003711", "sd": "D012735", "hepatic complication": "D008107", "hematological malignancies": "D019337", "hbv infection": "D006509", "impairment of learning": "D007859", "learning impairment": "D007859", "bruising": "D003288", "orofacial dyskinesia": "D004409", "td": "D004409", "orofacial diskinesia": "D004409", "acute psychosis": "D011605", "trigeminal neuralgia": "D014277", "tubular necrosis": "D007683", "visual hallucinations": "D006212", "glomerular injury": "D007674" } }
BioGPT/data/BC5CDR/raw/train.entities.json/0
{ "file_path": "BioGPT/data/BC5CDR/raw/train.entities.json", "repo_id": "BioGPT", "token_count": 48619 }
150
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import sys import re out_file = sys.argv[1] prefix = [ '(learned[0-9]+ )+', 'we can conclude that', 'we have that', 'in conclusion,', ] def strip_prefix(line): for p in prefix: res = re.search(p, line) if res is not None: line = re.split(p, line)[-1].strip() break return line def convert_relis_sentence(sentence): ans = None segs = re.search(r"the answer to the question given the context is(.*)", sentence) if segs is not None: segs = segs.groups() ans = segs[0].strip() return ans all_lines = [] with open(out_file, "r", encoding="utf8") as fr: for line in fr: e = line.strip() if len(e) > 0 and e[-1] == ".": all_lines.append(e[:-1]) else: all_lines.append(e) hypothesis = [] cnt = 0 fail_cnt = 0 for i, line in enumerate(all_lines): cnt += 1 strip_line = strip_prefix(line) ans = convert_relis_sentence(strip_line) if ans is not None: hypothesis.append(ans) else: hypothesis.append("failed") fail_cnt += 1 print("Failed:id:{}, line:{}".format(i+1, line)) with open(f"{out_file}.extracted.txt", "w", encoding="utf8") as fw: for eg in hypothesis: print(eg, file=fw) print(f"failed = {fail_cnt}, total = {cnt}")
BioGPT/examples/QA-PubMedQA/postprocess.py/0
{ "file_path": "BioGPT/examples/QA-PubMedQA/postprocess.py", "repo_id": "BioGPT", "token_count": 645 }
151
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. SAVE_DIR=../../checkpoints/RE-DDI-BioGPT mkdir -p ${SAVE_DIR} fairseq-train \ ../../data/DDI/relis-bin --save-dir ${SAVE_DIR} \ --user-dir ../../src \ --finetune-from-model ../../checkpoints/Pre-trained-BioGPT/checkpoint.pt \ --task language_modeling_prompt \ --arch transformer_lm_prompt_biogpt \ --share-decoder-input-output-embed --decoder-learned-pos \ --optimizer adam --adam-betas '(0.9, 0.98)' \ --weight-decay 0.01 --clip-norm 0.0 \ --lr 1e-4 --lr-scheduler inverse_sqrt --warmup-updates 500 --warmup-init-lr 1e-07 \ --tokens-per-sample 1024 --max-source-positions 640 --max-target-positions 1024 \ --max-tokens 1024 --update-freq 32 \ --skip-invalid-size-inputs-valid-test \ --max-epoch 100 --keep-last-epochs 5 \ --learned-prompt 9
BioGPT/examples/RE-DDI/train.sh/0
{ "file_path": "BioGPT/examples/RE-DDI/train.sh", "repo_id": "BioGPT", "token_count": 362 }
152
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import logging import os from dataclasses import dataclass, field from typing import Optional import torch from fairseq import search, utils from fairseq.data import ( Dictionary, data_utils, indexed_dataset, ) from fairseq.tasks import register_task from fairseq.tasks.language_modeling import LanguageModelingConfig, LanguageModelingTask from .language_model_prompt_dataset import LanguageModelPromptDataset from omegaconf import II logger = logging.getLogger(__name__) @dataclass class LanguageModelingPromptConfig(LanguageModelingConfig): source_lang: Optional[str] = field( default=None, metadata={"help": "source language", "argparse_alias": "-s",} ) target_lang: Optional[str] = field( default=None, metadata={"help": "target language","argparse_alias": "-t",} ) max_source_positions: Optional[int] = field( default=384, metadata={"help": "max number of tokens in the source sequence, exclude eos."} ) manual_prompt: Optional[str] = field( default=None, metadata={"help": "manual prompt to use",} ) learned_prompt: Optional[int] = field( default=None, metadata={"help": "number of virtual tokens to use",} ) learned_prompt_pattern: Optional[str] = field( default='learned', metadata={"help": "pattern of virtual tokens, default is learned",} ) prefix: Optional[bool] = field( default=False, metadata={"help": "whether put prompt as prefix."} ) sep_token: Optional[str] = field( default="<seqsep>", metadata={"help": "token to seperate prompt source and target."} ) @register_task("language_modeling_prompt", dataclass=LanguageModelingPromptConfig) class LanguageModelingPromptTask(LanguageModelingTask): """ Train a language model. Args: dictionary (~fairseq.data.Dictionary): the dictionary for the input of the language model output_dictionary (~fairseq.data.Dictionary): the dictionary for the output of the language model. In most cases it will be the same as *dictionary*, but could possibly be a more limited version of the dictionary (if ``--output-dictionary-size`` is used). targets (List[str]): list of the target types that the language model should predict. Can be one of "self", "future", and "past". Defaults to "future". .. note:: The language modeling task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate`, :mod:`fairseq-interactive` and :mod:`fairseq-eval-lm`. The language modeling task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.language_modeling_parser :prog: """ def __init__(self, args, dictionary, output_dictionary=None, prompt=None, targets=None): super().__init__(args, dictionary, output_dictionary, targets) self.prompt = prompt self.prompt_length = self.prompt.size(0) if self.prompt is not None else 0 self.prefix = args.prefix @classmethod def setup_prompt(cls, args, dictionary): if args.prefix: dictionary.sep_index = dictionary.add_symbol(args.sep_token) else: dictionary.sep_index = None assert not (args.manual_prompt and args.learned_prompt), "manual prompt and learned prompt can not be set " if args.manual_prompt and len(args.manual_prompt) != 0: prompt = dictionary.encode_line(args.manual_prompt, append_eos=False).long() elif args.learned_prompt: prompt = '' for idx in range(args.learned_prompt): prompt += args.learned_prompt_pattern + str(idx+1) + ' ' prompt = dictionary.encode_line(prompt, append_eos=False).long() else: prompt = None return prompt @classmethod def setup_dictionary(cls, args, **kwargs): dictionary = None output_dictionary = None if args.data: paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))) logger.info("dictionary: {} types".format(len(dictionary))) #output_dictionary = Dictionary.load(os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))) output_dictionary = dictionary return (dictionary, output_dictionary) @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ paths = utils.split_paths(args.data) assert len(paths) > 0 # find language pair automatically if args.source_lang is None or args.target_lang is None: args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0]) if args.source_lang is None or args.target_lang is None: raise Exception( "Could not infer language pair, please provide it explicitly" ) dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs) prompt = cls.setup_prompt(args, dictionary) # upgrade old checkpoints if getattr(args, "exclude_self_target", False): args.self_target = False targets = [] if getattr(args, "self_target", False): targets.append("self") if getattr(args, "future_target", False): targets.append("future") if getattr(args, "past_target", False): targets.append("past") if len(targets) == 0: # standard language modeling targets = ["future"] return cls(args, dictionary, output_dictionary, prompt, targets=targets) def load_dataset( self, split: str, epoch=1, combine=False, **kwargs ) -> LanguageModelPromptDataset: """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl) paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # source if split_exists(split, self.args.source_lang, self.args.target_lang, self.args.source_lang, data_path): prefix = os.path.join(data_path, "{}.{}-{}.".format(split, self.args.source_lang, self.args.target_lang)) else: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) src_dataset = data_utils.load_indexed_dataset( prefix + self.args.source_lang, self.dictionary, self.args.dataset_impl ) tgt_dataset = data_utils.load_indexed_dataset( prefix + self.args.target_lang, self.output_dictionary, self.args.dataset_impl ) src_sizes = src_dataset.sizes tgt_sizes = tgt_dataset.sizes dataset = LanguageModelPromptDataset( src_dataset, src_sizes, self.dictionary, tgt_dataset, tgt_sizes, prefix = self.prefix, prompt=self.prompt, max_source_length=self.args.max_source_positions, max_length=self.args.max_target_positions, prompt_length=self.prompt_length ) self.datasets[split] = dataset def build_dataset_for_inference(self, src_tokens, src_lengths, tgt_tokens=None, tgt_lengths=None): """ Generate batches for inference. We prepend an eos token to src_tokens (or bos if `--add-bos-token` is set) and we append a <pad> to target. This is convenient both for generation with a prefix and LM scoring. """ bs = len(src_tokens) if tgt_tokens is None: tgt_tokens = [torch.LongTensor([self.dictionary.eos()]) for _ in range(bs)] tgt_lengths = torch.LongTensor([t.numel() for t in tgt_tokens]) dataset = LanguageModelPromptDataset( src_tokens, src_lengths, self.dictionary, tgt_tokens, tgt_lengths, prefix = self.prefix, prompt=self.prompt, max_source_length=self.args.max_source_positions, max_length=self.args.max_target_positions, prompt_length=self.prompt_length ) return dataset def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None, allowed_text=None): with torch.no_grad(): # Generation will always be conditioned on bos_token if getattr(self.args, "add_bos_token", False): bos_token = self.source_dictionary.bos() else: bos_token = self.source_dictionary.eos() if constraints is not None: raise NotImplementedError( "Constrained decoding with the language_modeling task is not supported" ) if allowed_text is not None: allowed_text = self.target_dictionary.encode_line(allowed_text, add_if_not_exist=False).to(sample['net_input']['src_tokens']) # SequenceGenerator doesn't use src_tokens directly, we need to # pass the `prefix_tokens` argument instead if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): prefix_tokens = sample["net_input"]["src_tokens"] if prefix_tokens[:, 0].eq(bos_token).all(): prefix_tokens = prefix_tokens[:, 1:] return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token, allowed_text=allowed_text ) def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None ): from .constrained_generator import ConstrainedGenerator # Choose search strategy. Defaults to Beam Search. sampling = getattr(args, "sampling", False) sampling_topk = getattr(args, "sampling_topk", -1) sampling_topp = getattr(args, "sampling_topp", -1.0) diverse_beam_groups = getattr(args, "diverse_beam_groups", -1) diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5) match_source_len = getattr(args, "match_source_len", False) diversity_rate = getattr(args, "diversity_rate", -1) constrained = getattr(args, "constraints", False) if prefix_allowed_tokens_fn is None: prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None) if ( sum( int(cond) for cond in [ sampling, diverse_beam_groups > 0, match_source_len, diversity_rate > 0, ] ) > 1 ): raise ValueError("Provided Search parameters are mutually exclusive.") assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling" assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling" if sampling: search_strategy = search.Sampling( self.target_dictionary, sampling_topk, sampling_topp ) elif diverse_beam_groups > 0: search_strategy = search.DiverseBeamSearch( self.target_dictionary, diverse_beam_groups, diverse_beam_strength ) elif match_source_len: # this is useful for tagging applications where the output # length should match the input length, so we hardcode the # length constraints for simplicity search_strategy = search.LengthConstrainedBeamSearch( self.target_dictionary, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0, ) elif diversity_rate > -1: search_strategy = search.DiverseSiblingsSearch( self.target_dictionary, diversity_rate ) elif constrained: search_strategy = search.LexicallyConstrainedBeamSearch( self.target_dictionary, args.constraints ) elif prefix_allowed_tokens_fn: search_strategy = search.PrefixConstrainedBeamSearch( self.target_dictionary, prefix_allowed_tokens_fn ) else: search_strategy = search.BeamSearch(self.target_dictionary) extra_gen_cls_kwargs = extra_gen_cls_kwargs or {} seq_gen_cls = ConstrainedGenerator return seq_gen_cls( models, self.target_dictionary, beam_size=getattr(args, "beam", 5), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), search_strategy=search_strategy, **extra_gen_cls_kwargs, )
BioGPT/src/language_modeling_prompt.py/0
{ "file_path": "BioGPT/src/language_modeling_prompt.py", "repo_id": "BioGPT", "token_count": 6301 }
153
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import bitblas from bitblas.base.roller.policy import TensorCorePolicy, DefaultPolicy from bitblas.base.roller.arch import CUDA from bitblas.gpu.matmul_analysis import get_tensorized_func_and_tags from bitblas.gpu import Matmul from bitblas.utils import auto_detect_nvidia_target from bitblas.base.utils import apply_and_build from bitblas.ops.impl.matmul_dequantize_impl import ( matmul_nt_dequantize_b, matmul_nt_dequantize_b_propagate_a_propagate_b, ) import tvm import time import argparse bitblas.set_log_level("DEBUG") # append a parser for the benchmark set parser = argparse.ArgumentParser(description="Benchmark BitBLAS int8xint1 on a specific target.") parser.add_argument( "--target", type=str, default=auto_detect_nvidia_target(), ) parser.add_argument( "--batch_seq", type=int, default=1, help="The batch size of the sequence", ) parser.add_argument( "--group_size", type=int, default=-1, help="The group size of the sequence", ) parser.add_argument( "--benchmark_sets", nargs="+", default=["llm_int8xint1"], help="List of benchmark sets, e.g., llm_int8xint1_bs4096", ) args = parser.parse_args() batch_seq = args.batch_seq group_size = args.group_size # fmt:off llm_int8xint1 = [ # square test (matmul_nt_dequantize_b, (1, 16384, 16384, "int8", "int8", "int32", 1, "int8", "int", False, False, group_size, True, False), Matmul), # BLOOM-176B (matmul_nt_dequantize_b, (1, 43008, 14336, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 14336, 14336, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 57344, 14336, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 14336, 57344, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), # # OPT-65B (matmul_nt_dequantize_b, (1, 9216, 9216, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 36864, 9216, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 9216, 36864, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 22016, 8192, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), # LLAMA-70B/65B (matmul_nt_dequantize_b, (1, 8192, 22016, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 8192, 8192, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 28672, 8192, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b, (1, 8192, 28672, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), # square test (matmul_nt_dequantize_b_propagate_a_propagate_b, (16384, 16384, 16384, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), # BLOOM-176B (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 43008, 14336, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 14336, 14336, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 57344, 14336, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 14336, 57344, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), # OPT-65B (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 9216, 9216, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 36864, 9216, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 9216, 36864, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 22016, 8192, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), # LLAMA-70B/65B (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 8192, 22016, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 8192, 8192, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 28672, 8192, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), (matmul_nt_dequantize_b_propagate_a_propagate_b, (8192, 8192, 28672, "int8", "int8", "int32", 1, "int8", "uint", False, False, group_size, True, False), Matmul), ] # fmt:on target = tvm.target.Target(args.target) benchmark_sets = [] for benchmark_set in args.benchmark_sets: benchmark_sets.extend(eval(benchmark_set)) benchmark_results = {} for get_prim_func, input_args, d_schedule in benchmark_sets: ir_module = get_prim_func(*input_args) func = ir_module["main"] arch = CUDA(target) policy = DefaultPolicy(func=func, arch=arch) try: tensorized_func, tags = get_tensorized_func_and_tags(func, arch.target) except Exception: tags = None if tags: policy = TensorCorePolicy(func=tensorized_func, arch=arch, tags=tags) configs = policy.emit_config(20) tune_start = time.time() cpresults, best = apply_and_build(func, configs, arch, parallel_build=True) fast_tune_time = time.time() - tune_start # print(best.sch.mod) print(best.code) print("[BitBLAS] The best latency of top 1 is {:.3f} ms".format(cpresults[0].latency)) print("[BitBLAS] The best latency of top 20 is {:.3f} ms".format(best.latency)) # evaluate the performance of the default schedule rule = d_schedule() default_tune_start = time.time() with arch.target: mod = bitblas.ApplyDefaultSchedule( # pylint: disable=not-callable bitblas.gpu.Matmul(), bitblas.gpu.GEMV(), bitblas.gpu.Reduction(), bitblas.gpu.GeneralReduction(), bitblas.gpu.Fallback(), )( ir_module) try: with tvm.transform.PassContext(config={"tir.use_async_copy": True}): mod_default = tvm.build(mod, target="cuda") except Exception: mod_default = None default_tune_time = time.time() - default_tune_start args = func.buffer_map.values() profile_tensors = best.profile_tensors if mod_default is not None: timer_cuda_mod = mod_default.time_evaluator(mod_default.entry_name, arch.device, number=5) t = timer_cuda_mod(*profile_tensors).mean else: t = 1e4 - 1 print("Time cost of BitBLAS default schedule: {:.3f} ms".format(t * 1e3)) profile_config = { f"{get_prim_func.__name__}-{'-'.join([str(i) for i in input_args])}": { "fast_bitblas_top20_tune_time": fast_tune_time, "fast_bitblas_top1_latency": cpresults[0].latency, "fast_bitblas_top20_latency": best.latency, "default_bitblas_tune_time": default_tune_time, "default_bitblas_latency": t * 1e3 if t is not None else "Failed", } } benchmark_results.update(profile_config) headers = [ "PrimFunc", "Input Arguments", "BitBLAS Top20 Tune Time", "BitBLAS Top1 Latency", "BitBLAS Top20 Latency", "DefaultDLight Tune Time", "DefaultDLight Latency", ] col_width = (max(len(word) for row in [headers] + list(profile_config.values()) for word in row) + 2 ) # padding print("".join(word.ljust(col_width) for word in headers)) print("-" * col_width * len(headers)) for config, values in benchmark_results.items(): args = config.split("-") func_name = args[0] input_args = "-".join(args[1:]) row = [ func_name, input_args, f" {str(values['fast_bitblas_top20_tune_time'])} s", f"{values['fast_bitblas_top1_latency']:.3f} ms", f"{values['fast_bitblas_top20_latency']:.3f} ms", str(values["default_bitblas_tune_time"]), f"{values['default_bitblas_latency']:.3e} ms", ] print("".join(word.ljust(col_width) for word in row))
BitBLAS/benchmark/dsl/matmul_dequantize_int1.py/0
{ "file_path": "BitBLAS/benchmark/dsl/matmul_dequantize_int1.py", "repo_id": "BitBLAS", "token_count": 4475 }
154
Please checkout https://github.com/LeiWang1999/AutoGPTQ for details currently. The relative pull request to the official AutoGPTQ is still under construction.
BitBLAS/integration/AutoGPTQ/README.md/0
{ "file_path": "BitBLAS/integration/AutoGPTQ/README.md", "repo_id": "BitBLAS", "token_count": 39 }
155
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from string import Template import os import tvm from tvm import IRModule from tvm.target import Target from bitblas.utils import match_global_kernel, auto_detect_nvidia_target from bitblas.base.analysis import get_reduction_blocks from bitblas.ops import Operator from bitblas.ops.matmul_dequantize import ( MatmulWeightOnlyDequantize, MatmulWeightOnlyDequantizeConfig, ) from bitblas.gpu.intrin.lop3 import ( decode_i2_to_f16, decode_i2_to_f16_scale, decode_i2_to_f16_scale_zeros, decode_i4_to_f16, decode_i4_to_f16_scale, decode_i4_to_f16_scale_zeros, ) bit = 2 mask = (1 << bit) - 1 group_size = 128 ft_shapes = [ [1, 15360, 5120], [128, 15360, 5120], ] target = tvm.target.Target(auto_detect_nvidia_target()) def get_template_path(): cur_dir = os.path.dirname(os.path.abspath(__file__)) return os.path.join(cur_dir, f"template/kernel_template.int{bit}.bitblas.cu.template") template_path = get_template_path() def get_codegen_result(ops: Operator, target: Target): code = ops.get_source(target=target) return code def get_thread_block_information(mod: IRModule): sch = tvm.tir.Schedule(mod) root_block = sch.get_block("root") child_blocks = sch.get_child_blocks(root_block) reduction_blocks = get_reduction_blocks(sch, child_blocks) assert len(reduction_blocks) == 1 (main_block,) = reduction_blocks loops = sch.get_loops(main_block) block_info = [1, 1, 1] grid_info = [1, 1, 1] for loop in loops: stmt = sch.get(loop) thread_binding = stmt.thread_binding extent = int(stmt.extent) if thread_binding is None: continue if thread_binding.thread_tag == "threadIdx.x": block_info[0] = extent elif thread_binding.thread_tag == "threadIdx.y": block_info[1] = extent elif thread_binding.thread_tag == "threadIdx.z": block_info[2] = extent elif thread_binding.thread_tag == "blockIdx.x": grid_info[0] = extent elif thread_binding.thread_tag == "blockIdx.y": grid_info[1] = extent elif thread_binding.thread_tag == "blockIdx.z": grid_info[2] = extent return block_info, grid_info kernel_body = "" kernel_call = "" for M, N, K in ft_shapes: matmul_config = MatmulWeightOnlyDequantizeConfig( M=M, N=N, K=K, in_dtype="float16", out_dtype="float16", accum_dtype="float16", bit=bit, storage_dtype="int8", source_format="uint", with_scaling=True, with_zeros=True, group_size=group_size, fast_decoding=True, with_bias=False, propagate_a=False, propagate_b=False, layout="nt", ) matmul = MatmulWeightOnlyDequantize( config=matmul_config, target=target, ) matmul.hardware_aware_finetune(topk=20) code = get_codegen_result(matmul, target) index = match_global_kernel(code) headers = code[:index] headers.replace('extern "C" ', "") declarations = code[index:].split(";")[0] index = code.index("{", index) function_body = declarations + code[index:] # get block information from mod block_size, grid_size = get_thread_block_information(matmul.optimized_func) if M != 1 and block_size[0] == 1: block_size[0] = 32 new_kernel_name = f"bitblas_kernel_fp16_int{bit}_fp16_m{M}n{N}k{K}_nt" Qweight_bytes = N * K // 8 * bit Scale_bytes = N * K // group_size * 2 function_body = function_body.replace("main_kernel", new_kernel_name) call = f""" // const dim3 GridDim({grid_size[0]}, {grid_size[1]}, {grid_size[2]}); // const dim3 BlockDim({block_size[0]}, {block_size[1]}, {block_size[2]}); // {new_kernel_name}<<<GridDim, BlockDim>>>(input_0, input_1, output); """ function_body = function_body.replace( "(half* __restrict__ A, signed char* __restrict__ B, half* __restrict__ D, half* __restrict__ Scale, half* __restrict__ Zeros){", f"(half* __restrict__ A, half* __restrict__ QB, half* __restrict__ D) {{\n\ signed char* B = ((int8_t *)QB);\n\t half* Scale = (half *)((int8_t *)QB + {Qweight_bytes}); \n\t half* Zeros = (half *)((int8_t *)QB + {Qweight_bytes + Scale_bytes}); \ {call}", ) kernel_body += function_body kernel_body += "\n\n" real_call = call.replace("//", "") real_call = f""" if (M == {M} && N == {N} && K == {K}){{ {real_call} return 0; }} """ kernel_call += real_call # make output cur_dir = os.path.dirname(os.path.abspath(__file__)) ladder_path = os.path.join(cur_dir, "kenrel_output") if not os.path.exists(ladder_path): os.makedirs(ladder_path) ladder_kernel_path = os.path.join(ladder_path, "ladder_kernel.cu") ladder_header_path = os.path.join(ladder_path, "ladder_kernel.h") with open( template_path, mode="r", encoding="utf-8") as r_f, open( ladder_kernel_path, mode="w", encoding="utf8") as w_f: template_content = r_f.read() template = Template(template_content) data = template.substitute(kernel_body=kernel_body, kernel_call=kernel_call) w_f.write(data) pack_half2 = """ // Pack two half values. static inline __device__ __host__ unsigned __pack_half2(const half x, const half y) { unsigned v0 = *((unsigned short *)&x); unsigned v1 = *((unsigned short *)&y); return (v1 << 16) | v0; } """ with open(ladder_header_path, mode="w", encoding="utf8") as w_f: headers = f"""// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #ifndef __LADDER_KERNEL_H__ #define __LADDER_KERNEL_H__ #include <cuda_fp16.h> #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 800) #define TVM_ENABLE_L2_PREFETCH 1 #else #define TVM_ENABLE_L2_PREFETCH 0 #endif #ifdef _WIN32 using uint = unsigned int; using uchar = unsigned char; using ushort = unsigned short; using int64_t = long long; using uint64_t = unsigned long long; #else #define uint unsigned int #define uchar unsigned char #define ushort unsigned short #define int64_t long long #define uint64_t unsigned long long #endif #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 800) #define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 1 #else #define TVM_ENBALE_EFFICIENT_SMEM_PTR_CAST 0 #endif {decode_i4_to_f16} {decode_i4_to_f16_scale} {decode_i4_to_f16_scale_zeros} {decode_i2_to_f16} {decode_i2_to_f16_scale} {decode_i2_to_f16_scale_zeros} {pack_half2} int ladder_gemm_fp16xint{bit}_fp16(half *input_0, half *input_1, half *output, const int M, const int N, const int K, const int trans_a, const int trans_b, half *workspace_ptr); #endif """ w_f.write(headers)
BitBLAS/integration/bitdistiller/kernel_generator_dynzeros.py/0
{ "file_path": "BitBLAS/integration/bitdistiller/kernel_generator_dynzeros.py", "repo_id": "BitBLAS", "token_count": 2997 }
156
#!/bin/bash # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. echo "Add MIT liscense boilerplate..." PWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # TO source code root pushd "${PWD}/../../" > /dev/null EXITCODE=0 for SRC_FILE in $(find . -path './3rdparty' -prune -false -o -path './build' -prune -false -o -type f -not -name \ '*apply_mit_liscense.sh' -not -name '*check_mit_liscense.sh' -and \( -name '*.cpp' -or -name '*.h*' -or -name '*.cu' -or -name '*.in' \) ); do sed -i '/\/\/\s*Microsoft\s*(c)/Id' ${SRC_FILE} if !(grep -q "Copyright (c) Microsoft Corporation." "${SRC_FILE}"); then cat maint/scripts/mit_liscense1.txt ${SRC_FILE} > ${SRC_FILE}.new mv ${SRC_FILE}.new ${SRC_FILE} fi done for SRC_FILE in $(find . -path './3rdparty' -prune -false -o -path './build' -prune -false -o -type f -not -name \ '*apply_mit_liscense.sh' -not -name '*check_mit_liscense.sh' -and \( -name 'CMakeLists.txt' -or -name '*.cmake' \ -or -name '*.py' -or -name '*.dockerfile' -or -name '*.yaml' \) ); do sed -i '/\#\s*Microsoft\s*(c)/Id' ${SRC_FILE} if !(grep -q "Copyright (c) Microsoft Corporation" "${SRC_FILE}"); then cat maint/scripts/mit_liscense2.txt ${SRC_FILE} > ${SRC_FILE}.new mv ${SRC_FILE}.new ${SRC_FILE} fi done for SRC_FILE in $(find . -path './3rdparty' -prune -false -o -path './build' -prune -false -o -type f -not -name \ '*apply_mit_liscense.sh' -not -name '*check_mit_liscense.sh' -name '*.sh' ); do sed -i '/\#\s*Microsoft\s*(c)/Id' ${SRC_FILE} if !(grep -q "Copyright (c) Microsoft Corporation" "${SRC_FILE}"); then line=$(head -n 1 ${SRC_FILE}) if [[ $line == "#!/bin/bash"* ]]; then (echo ${line}; echo ''; cat maint/scripts/mit_liscense2.txt; echo "$(tail -n +2 "${SRC_FILE}")" ) > ${SRC_FILE}.new else cat maint/scripts/mit_liscense2.txt ${SRC_FILE} > ${SRC_FILE}.new fi mv ${SRC_FILE}.new ${SRC_FILE} fi done echo "Done." popd > /dev/null exit $EXITCODE
BitBLAS/maint/scripts/apply_mit_license.sh/0
{ "file_path": "BitBLAS/maint/scripts/apply_mit_license.sh", "repo_id": "BitBLAS", "token_count": 957 }
157
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import tvm from tvm.target import Target from .arch_base import TileDevice from typing import List, Dict def check_sm_version(arch: str) -> int: sm_version = arch.replace("sm_", "") return int(sm_version) if sm_version.isdigit() else -1 class TensorInstruction(object): def __init__( self, name: str, intrin_group: Dict, shape: List[int], ): self.name: str = name self.intrin_group: Dict = intrin_group # only maintain the shape of M and N self.shape: List[int] = shape class CUDA(TileDevice): def __init__(self, target: Target): self.target = target self.sm_version = check_sm_version(self.target.arch) device = tvm.runtime.cuda(0) if not device.exist: raise RuntimeError("Cannot find cuda device 0.") self.device: tvm.runtime.Device = device self.platform: str = "CUDA" self.smem_cap = device.max_shared_memory_per_block self.compute_max_core = device.multi_processor_count self.warp_size = device.warp_size self.compute_capability = device.compute_version.replace(".", "") self.reg_cap: int = 65536 self.max_smem_usage: int = 2 * self.smem_cap self.sm_partition: int = 4 self.l2_cache_size_bytes: int = target.l2_cache_size_bytes # the number of transaction size in bytes self.transaction_size: List[int] = [32, 128] # in bytes # bandwidth in MB/s, will be used for recommend basic tile size # TODO(lei): find some way to get the real bandwidth # However, the ratio of bandwidth between different devices can # be similar. The bandwidth can work for another devices as well. self.bandwidth: List[int] = [750, 12080] # get the available tensor instructions during runtime to avoid # the dependency of the tensor intrinsics registration self.available_tensor_instructions: List[TensorInstruction] = None def get_avaliable_tensorintrin_shapes(self): from tvm.tir.tensor_intrin.cuda import get_wmma_intrin_group, get_mma_intrin_group self.available_tensor_instructions = ( TensorInstruction("mma", get_mma_intrin_group, [16, 16]), TensorInstruction("wmma", get_wmma_intrin_group, [16, 16]), ) return [t.shape for t in self.available_tensor_instructions]
BitBLAS/python/bitblas/base/roller/arch/cuda.py/0
{ "file_path": "BitBLAS/python/bitblas/base/roller/arch/cuda.py", "repo_id": "BitBLAS", "token_count": 1010 }
158
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import bitblas from bitblas.ops.operator import OperatorConfig, Operator from dataclasses import asdict import os import json import tempfile from hashlib import sha256 import shutil import tvm from tvm.contrib.tar import tar import logging logger = logging.getLogger(__name__) BITBLAS_DATABASE_PATH = os.path.expanduser("~/.cache/bitblas") class OperatorCache: """ Manages a cache for operator instances (e.g., Matmul, Convolution) based on their configurations. """ def __init__(self): self.cache = {} def add(self, config: OperatorConfig, op_inst: Operator): self.cache[config] = op_inst def get(self, config: OperatorConfig): return self.cache.get(config) def exists(self, config): return config in self.cache def clear(self): self.cache.clear() def size(self): return len(self.cache) def save_into_database(self, database_path=None, target=None): database_path = self._ensure_database_path(database_path) for config, op_inst in self.cache.items(): arch_str = self._determine_arch_str(op_inst, target) arch_path = os.path.join(database_path, arch_str) self._ensure_directory(arch_path) hash_str = sha256(repr(config).encode()).hexdigest() config_path = os.path.join(arch_path, hash_str) # if the config already exists, skip saving if os.path.exists(config_path): continue self._ensure_directory(config_path) self._save_operator_config_and_artifact(config, op_inst, config_path) def load_from_database(self, database_path, target=None): if not os.path.exists(database_path): logger.info( f"Database path {database_path} does not exist, skipping loading operators from the database" ) return arch_str = self._determine_target_arch_str(target) arch_path = os.path.join(database_path, arch_str) if not os.path.exists(arch_path): logger.info( f"Target {arch_str} does not exist in the database, skipping loading operators from the database" ) return self._load_operators_from_arch_path(arch_path, target) def _ensure_database_path(self, database_path): if database_path is None: return tempfile.mkdtemp() os.makedirs(database_path, exist_ok=True) return database_path def _determine_arch_str(self, op_inst, target): return (target if target else "-".join(list(op_inst.target.keys) + [op_inst.target.arch])) def _ensure_directory(self, path): os.makedirs(path, exist_ok=True) def _save_operator_config_and_artifact(self, config, op_inst, config_path): config_type, operator_type = type(config).__name__, type(op_inst).__name__ with open(os.path.join(config_path, f"{config_type}.json"), "w") as json_file: json.dump(asdict(config), json_file) artifact_path = os.path.join(config_path, "tvm_rt_mod." + tar.output_format) try: op_inst.rt_mod.export_library(artifact_path, fcompile=tar) except Exception as e: # library does not support export_library export_error = e # noqa: F841 pass json_data = {"config_type": config_type, "operator_type": operator_type} json_file_path = os.path.join(config_path, "mapping.json") with open(json_file_path, "w") as json_file: json.dump(json_data, json_file) # For writing source.cu file source_file_path = os.path.join(config_path, "source.cu") with open(source_file_path, "w") as source_file: source_file.write(op_inst.get_source()) # For writing optimized.py file optimized_file_path = os.path.join(config_path, "optimized.py") with open(optimized_file_path, "w") as optimized_file: if op_inst.optimized_func is not None: optimized_file.write(op_inst.optimized_func.script(show_meta=False)) if op_inst.wrapper.lib_name is not None: # copy lib name to the same directory as the artifact src_name = op_inst.wrapper.src_name shutil.copy( src_name, os.path.join(config_path, os.path.basename("wrapper_source.cu")), ) lib_name = op_inst.wrapper.lib_name shutil.copy( lib_name, os.path.join(config_path, os.path.basename("wrapper_compiled.so")), ) def _determine_target_arch_str(self, target): return (target if isinstance(target, str) else "-".join(list(target.keys) + [target.arch])) def _load_operators_from_arch_path(self, arch_path, target): for root, dirs, _ in os.walk(arch_path): for directory in dirs: config_path = os.path.join(root, directory) self._load_operator(config_path, target) def _load_operator(self, config_path, target): mapping, config, rt_mod, src_name, lib_name = None, None, None, None, None for file in os.listdir(config_path): full_path = os.path.join(config_path, file) if file == "mapping.json": with open(full_path) as f: mapping = json.load(f) elif file.endswith(".json"): with open(full_path) as f: config = json.load(f) elif file.endswith(".tar"): rt_mod = tvm.runtime.load_module(full_path) elif file == "wrapper_compiled.so": lib_name = full_path elif file == "wrapper_source.cu": src_name = full_path if mapping and config and rt_mod: self._instantiate_and_add_operator(mapping, config, rt_mod, src_name, lib_name, target) def _instantiate_and_add_operator(self, mapping, config, rt_mod, src_name, lib_name, target): config_cls = getattr(bitblas, mapping["config_type"]) operator_cls = getattr(bitblas, mapping["operator_type"]) op_inst = operator_cls( config=config_cls(**config), target=target, enable_tuning=False, from_database=True) op_inst.update_runtime_module(rt_mod, src_name=src_name, lib_name=lib_name) self.add(config_cls(**config), op_inst) global_operator_cache = OperatorCache() def load_global_ops_cache(database_path=BITBLAS_DATABASE_PATH, target=None): if target is None: target = bitblas.auto_detect_nvidia_target() logger.info(f"Loading operators from database {database_path} for target {target}") global_operator_cache.load_from_database(database_path, target) return global_operator_cache def get_database_path(): return BITBLAS_DATABASE_PATH def set_database_path(path): global BITBLAS_DATABASE_PATH BITBLAS_DATABASE_PATH = path return BITBLAS_DATABASE_PATH
BitBLAS/python/bitblas/cache/operator.py/0
{ "file_path": "BitBLAS/python/bitblas/cache/operator.py", "repo_id": "BitBLAS", "token_count": 3124 }
159
# Copyright 2018 The apache/tvm Authors. All Rights Reserved. # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Modifications Copyright (c) Microsoft. # The code below is mostly copied from apache/tvm reduction.py in dlight. """A rule for reduction. """ from typing import List, Optional, Tuple, Union from tvm import arith, ir, tir from tvm.target import Target from ..base import ( BlockInfo, normalize_prim_func, try_inline_contiguous_spatial, detect_dominant_read, is_broadcast_epilogue, ) from . import utils from .base import GPUScheduleRule def _get_reduction_expr(block: tir.Block) -> Optional[tir.PrimExpr]: # Detect and return `Y` in `X[...] = X[...] + Y` buffer_store = block.body if not isinstance(buffer_store, tir.BufferStore): return None if not isinstance(buffer_store.value, tir.Add): return None if not ir.structural_equal( buffer_store.value.a, tir.BufferLoad(buffer_store.buffer, block.body.indices), map_free_vars=True, ): return None return buffer_store.value.b class Reduction(GPUScheduleRule): """A rule for Reduction.""" def apply( # pylint: disable=too-many-locals,too-many-branches,too-many-return-statements self, func: tir.PrimFunc, target: Target, _: bool, ) -> Union[None, tir.Schedule, List[tir.Schedule]]: if not isinstance(func, tir.PrimFunc) or not self.is_target_available(target): return None sch = tir.Schedule(func) block_infos = normalize_prim_func(sch) if block_infos is None: return None block_infos = try_inline_contiguous_spatial(sch, block_infos) if len(block_infos) == 1: epilogue = None elif len(block_infos) == 2: epilogue = block_infos[1] if not epilogue.is_injective(): return None else: return None block_info = block_infos[0] block = block_info.block_rv block_stmt = sch.get(block) # Step 1. Check reduction block if ( (not block_info.is_reduction()) or len(block_stmt.writes) != 1 or _get_reduction_expr(block_stmt) is None ): return None # Step 2. Normalize the block, merge spatial and reduction iters is_inner_reduction, c_factor, loop_order, s_split_index = self._normalize( sch, block_info, arith.normalize_to_iter_sum( detect_dominant_read(block_stmt), input_iters={i.var: i.dom for i in block_stmt.iter_vars}, ), ) if is_inner_reduction is None and c_factor is None: return None # Step 3. Do the scheduling if is_inner_reduction: self._sch_inner_reduction( sch, target, block, c_factor, epilogue, loop_order, s_split_index ) else: self._sch_inner_spatial( sch, target, block, block_info, c_factor, epilogue, loop_order, s_split_index ) return sch def _normalize( # pylint: disable=too-many-branches self, sch: tir.Schedule, block_info: BlockInfo, access: arith.IterSumExpr, ) -> Tuple[Optional[bool], Optional[int]]: if access.base != 0: return None, None, None, None iter_to_info = {i.var: i for i in block_info.iters} s_loops, r_loops, c_loops, c_factor = [], [], [], None s_split_loop, s_split_index = None, None for split_expr in access.args: var = split_expr.source.source info = iter_to_info.pop(var) loop = info.loop_rv is_inner_reduction = info.kind == "R" if split_expr.lower_factor > 1: if c_loops: return None, None, None, None s_split_loop = loop s_split_index = len(s_loops) loop, c_loop = sch.split(loop, factors=[None, split_expr.lower_factor]) c_loops.append(c_loop) if not is_inner_reduction: c_factor = split_expr.lower_factor if is_inner_reduction: r_loops.append(loop) else: s_loops.append(loop) if iter_to_info: for var, info in iter_to_info.items(): if info.kind == "S" and info.dom.extent == 1: s_loops.append(info.loop_rv) else: return None, None, None, None loop_order = {} s_block_var_loops = [] for i in block_info.iters: if i.loop_rv in s_loops or i.loop_rv == s_split_loop: s_block_var_loops.append(i.loop_rv) for i in range(len(s_block_var_loops)): for j in range(len(s_loops)): if s_block_var_loops[i] == s_loops[j]: loop_order[i] = j break if s_block_var_loops[i] == s_split_loop: loop_order[i] = s_split_index break assert s_loops assert r_loops if len(s_loops) != len([i for i in block_info.iters if i.kind == "S"]): return None, None if not c_loops: c_loops = [sch.add_unit_loop(block_info.block_rv)] sch.reorder(*s_loops, *r_loops, *c_loops) sch.fuse(*s_loops) sch.fuse(*r_loops) return is_inner_reduction, c_factor, loop_order, s_split_index def _sch_inner_reduction( # pylint: disable=too-many-arguments self, sch: tir.Schedule, target: Target, block: tir.schedule.BlockRV, unroll_spatial_factor: Optional[int], epilogue_info: Optional[BlockInfo], loop_order, s_split_index, ): # pylint: disable=invalid-name _, r, _ = sch.get_loops(block) (len_tx,) = utils.suggest_threads_per_block( # pylint: disable=unbalanced-tuple-unpacking target, [sch.get(r)] ) _, tx = sch.split(r, factors=[None, len_tx]) # Schedule the RF block rf = sch.rfactor(tx, 0) bx, r, tx, _ = sch.get_loops(rf) sch.reorder(bx, tx, r) sch.bind(bx, "blockIdx.x") sch.bind(tx, "threadIdx.x") sch.annotate(tx, ann_key="pragma_auto_unroll_max_step", ann_val=256) sch.annotate(tx, ann_key="pragma_unroll_explicit", ann_val=1) sch.set_scope(rf, 0, "local") sch.decompose_reduction(rf, r) # Schedule the write back block sch.reverse_compute_at(block, bx, preserve_unit_loops=True) _, tx, *s = sch.get_loops(block) if unroll_spatial_factor: assert len(s) == len(loop_order) new_order_s = [s[loop_order[i]] for i in range(len(s))] sch.reorder(*new_order_s) new_order_s[s_split_index], c = sch.split( new_order_s[s_split_index], factors=[None, unroll_spatial_factor] ) sch.reorder(*new_order_s, c) s = sch.fuse(*new_order_s) sch.reorder(s, tx, c) else: s = sch.fuse(*s) sch.reorder(s, tx) sch.bind(tx, "threadIdx.x") # Schedule epilogue if epilogue_info is not None: epilogue = epilogue_info.block_rv sch.reverse_compute_at(epilogue, bx) if is_broadcast_epilogue(sch, block, epilogue): sch.set_scope(block, 0, "shared") _, *s = sch.get_loops(epilogue) # pylint: disable=invalid-name _, tx = sch.split(sch.fuse(*s), factors=[None, len_tx]) sch.bind(tx, "threadIdx.x") else: sch.set_scope(block, 0, "local") # pylint: enable=invalid-name def _sch_inner_spatial( self, sch: tir.Schedule, _: Target, block: tir.schedule.BlockRV, block_info: BlockInfo, unroll_spatial_factor: Optional[int], epilogue_info: Optional[BlockInfo], loop_order, s_split_index, ): # pylint: disable=invalid-name s, r, _ = sch.get_loops(block) len_tx, len_ty = 16, 16 s_factor = [i.dom.extent for i in block_info.iters if i.kind == "S"][-1] # get perfect spatial factor, spatial factor should be divide the innermost spatial loop so # that the block after r_factor and be reversed compute at the original scope while len_tx > 1: if s_factor % len_tx == 0: break len_tx -= 1 _, _ = sch.split(s, factors=[None, len_tx]) _, ty = sch.split(r, factors=[None, len_ty]) # Schedule the RF block rf = sch.rfactor(ty, 0) bx, tx, r, ty, _ = sch.get_loops(rf) sch.reorder(bx, tx, ty, r) sch.bind(tx, "threadIdx.x") sch.bind(ty, "threadIdx.y") sch.bind(bx, "blockIdx.x") sch.set_scope(rf, 0, "local") sch.decompose_reduction(rf, r) # Schedule the write back block sch.reverse_compute_at(block, bx, preserve_unit_loops=True) _, r, *s = sch.get_loops(block) if unroll_spatial_factor: assert len(s) == len(loop_order) new_order_s = [s[loop_order[i]] for i in range(len(s))] sch.reorder(*new_order_s) new_order_s[s_split_index], c = sch.split( new_order_s[s_split_index], factors=[None, unroll_spatial_factor] ) sch.reorder(*new_order_s, c) s = sch.fuse(*new_order_s) sch.reorder(s, c, r) else: s = sch.fuse(*s) sch.reorder(s, r) sch.bind(s, "threadIdx.x") sch.bind(r, "threadIdx.y") # Schedule epilogue if epilogue_info is not None: epilogue = epilogue_info.block_rv sch.reverse_compute_at(epilogue, bx) if is_broadcast_epilogue(sch, block, epilogue): sch.set_scope(block, 0, "shared") _, *s = sch.get_loops(epilogue) # pylint: disable=invalid-name _, tx, ty = sch.split(sch.fuse(*s), factors=[None, len_tx, len_ty]) sch.bind(tx, "threadIdx.x") sch.bind(ty, "threadIdx.y") else: # The epilogue is element-wise without broadcasting. # Thus the remaining spatial part should be bind to tx. sch.set_scope(block, 0, "local") _, *s = sch.get_loops(epilogue) # pylint: disable=invalid-name tx, _ = sch.split(sch.fuse(*s), factors=[len_tx, None]) sch.bind(tx, "threadIdx.x") # pylint: enable=invalid-name
BitBLAS/python/bitblas/gpu/reduction.py/0
{ "file_path": "BitBLAS/python/bitblas/gpu/reduction.py", "repo_id": "BitBLAS", "token_count": 5814 }
160
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import tvm import numpy as np from tvm.target import Target from bitblas.utils.tensor_adapter import tvm_tensor_to_torch from typing import List, Union, Optional, Any, Tuple from .operator import Operator, TransformKind from .impl.matmul_impl import select_implementation from bitblas.utils import tensor_replace_dp4a, tensor_remove_make_int4 from dataclasses import dataclass from .ladder_permutate import LadderPermutate, LadderPermutateConfig import logging logger = logging.getLogger(__name__) class TransformExecutorCPU: def __init__(self, operators: Optional[List[Operator]] = None): if operators is None: operators = [] self.operators = operators def append(self, op): self.operators.append(op) def is_none(self): return len(self.operators) == 0 def forward(self, weight): inputs = [weight] for op in self.operators: inputs.append(tvm_tensor_to_torch(op.get_profile_tensors()[-1]).cpu()) inputs = [op.forward(*inputs)] return inputs[-1] def __call__(self, *args: Any, **kwds: Any) -> Any: return self.forward(*args, **kwds) @property def size(self): return len(self.operators) @dataclass(frozen=True) class MatmulConfig: M: Union[int, Tuple[int]] N: int K: int in_dtype: str = "float16" out_dtype: str = "float16" accum_dtype: str = "float16" with_bias: bool = False # layout of matrix A and B # "nn": C[i, j] = A[i, k] * B[k, j] # "nt": C[i, j] = A[i, k] * B[j, k] layout: str = "nt" # weight transformation kind of matrix A propagate_a: TransformKind = TransformKind.NonTransform # weight transformation kind of matrix B propagate_b: TransformKind = TransformKind.NonTransform def __post_init__(self): # set M to tuple if it is list # otherwise, M is not hashable object.__setattr__(self, "M", tuple(self.M) if isinstance(self.M, list) else self.M) if isinstance(self.propagate_a, bool): object.__setattr__( self, "propagate_a", (TransformKind.IntraWarpTransform if self.propagate_a else TransformKind.NonTransform), ) elif isinstance(self.propagate_a, int): object.__setattr__(self, "propagate_a", TransformKind(self.propagate_a)) if isinstance(self.propagate_b, bool): object.__setattr__( self, "propagate_b", (TransformKind.IntraWarpTransform if self.propagate_b else TransformKind.NonTransform), ) elif isinstance(self.propagate_b, int): object.__setattr__(self, "propagate_b", TransformKind(self.propagate_b)) class Matmul(Operator): def __init__( self, config: MatmulConfig, name: str = "matmul", target: Union[str, Target] = "cuda", enable_tuning: bool = False, from_database: bool = False, ): super().__init__(name, config, target) target = self.target if target.kind.name != "cuda": raise ValueError("Currently only support cuda target") if isinstance(self.M, Tuple): self.dynamic_range = {"m": self.M} self.update_func(self.prim_func.with_attrs({"opt_shapes": self.dynamic_range})) else: self.dynamic_range = None if not from_database: self._build_default_module(target) if self.propagate_a: assert (self.propagate_a is TransformKind.NonTransform), "Currently only support NonTransform for input" ladder_permutate_config = LadderPermutateConfig( M=self.M, N=self.K, datatype=self.in_dtype, storage_dtype=self.in_dtype, propagate_kind="A", transpose_matrix=False, transform_kind=self.propagate_a, ) self.ladder_permutate_a = LadderPermutate( config=ladder_permutate_config, target=tvm.target.Target("llvm"), ) else: self.ladder_permutate_a = None if self.propagate_b: ladder_permutate_config = LadderPermutateConfig( M=self.N, N=self.K, datatype=self.in_dtype, storage_dtype=self.in_dtype, propagate_kind="B", transpose_matrix=(self.layout == "nt"), transform_kind=self.propagate_b, ) self.ladder_permutate_b = LadderPermutate( config=ladder_permutate_config, target=tvm.target.Target("llvm"), ) else: self.ladder_permutate_b = None input_executors = TransformExecutorCPU() if self.ladder_permutate_a is not None: input_executors.append(self.ladder_permutate_b) self.input_executors = input_executors weight_executors = TransformExecutorCPU() if self.ladder_permutate_b is not None: weight_executors.append(self.ladder_permutate_b) self.weight_executors = weight_executors if enable_tuning: self.hardware_aware_finetune() def _build_default_module(self, target: Target): try: self.optimized_func = self.apply_default_schedule(self.prim_func_mod, target) except Exception: self.optimized_func = None logger.warning( "[BitBLAS][Warning] Apply default schedule failed, should do hardware-aware optimization manually." ) self._build_runtime_module(target) def _select_implementation(self): return select_implementation( M=self.M, N=self.N, K=self.K, in_dtype=self.in_dtype, out_dtype=self.out_dtype, accum_dtype=self.accum_dtype, with_bias=self.with_bias, layout=self.layout, propagate_a=self.propagate_a, propagate_b=self.propagate_b, ) def post_process(self, code: str) -> str: code = tensor_replace_dp4a(code) code = tensor_remove_make_int4(code) return code def _profile_latency_with_dynamic_range(self) -> List: func = self.prim_func_mod["main"] device = self.arch.device def var_warpper(v, m): if isinstance(v, tvm.tir.Var): assert "opt_shapes" in func.attrs assert v.name in func.attrs["opt_shapes"] return m elif isinstance(v, tvm.tir.IntImm): return v.value else: raise RuntimeError("Not supported type: ", type(v)) benchmark_latencies = [] for m in self.dynamic_range["m"]: profile_tensors = [] for param in func.params: if param not in func.buffer_map: # in case of dynamic symbolic may in params continue arg = func.buffer_map[param] profile_tensors.append( tvm.nd.array( np.random.uniform(0, 1, [var_warpper(i, m) for i in arg.shape]).astype(arg.dtype), device=device, )) self.profile_tensors = profile_tensors latency = self.time_evaluator(*profile_tensors).mean * 1e3 benchmark_latencies.append({"m": m, "latency": latency}) # ms return benchmark_latencies def forward(self, *args) -> Any: if self.lib is None: self._forward_from_torch_func(*args) dynamic_symbolic = [] if self.dynamic_range is not None: # assume we only have one dynamic range m = args[0].shape[0] dynamic_symbolic.append(m) self._forward_from_prebuild_lib(*args, *dynamic_symbolic) @property def M(self): return self.config.M @property def N(self): return self.config.N @property def K(self): return self.config.K @property def in_dtype(self): return self.config.in_dtype @property def out_dtype(self): return self.config.out_dtype @property def accum_dtype(self): return self.config.accum_dtype @property def layout(self): return self.config.layout @property def with_bias(self): return self.config.with_bias @property def propagate_a(self): return self.config.propagate_a @property def propagate_b(self): return self.config.propagate_b @property def input_transform(self): return self.input_executors if self.input_executors.size else None @property def weight_transform(self): return self.weight_executors if self.weight_executors.size else None __all__ = ["Matmul", "MatmulConfig"]
BitBLAS/python/bitblas/ops/matmul.py/0
{ "file_path": "BitBLAS/python/bitblas/ops/matmul.py", "repo_id": "BitBLAS", "token_count": 4463 }
161
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from .general import CUDASourceWrapper, CUDASourceWrapperWithDynamic # noqa: F401
BitBLAS/python/bitblas/wrapper/__init__.py/0
{ "file_path": "BitBLAS/python/bitblas/wrapper/__init__.py", "repo_id": "BitBLAS", "token_count": 45 }
162
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import pytest import bitblas from bitblas import MatmulConfig, Matmul import logging from bitblas import set_log_level set_log_level(logging.DEBUG) def get_codegen_result(ops): code = ops.get_source() return code # fmt: off @pytest.mark.parametrize( "M,N,K,A_dtype,W_dtype,accum_dtype,out_dtype,layout,with_bias,group_size,with_scaling,with_zeros,zeros_mode", [ (1, 768, 768, "float16", "float16", "float16", "float16", "nt", False, -1, False, False, None), (768, 768, 768, "float16", "float16", "float16", "float16", "nt", False, -1, False, False, None), (1, 768, 768, "int8", "int8", "int32", "int8", "nt", False, -1, False, False, None), (768, 768, 768, "int8", "int8", "int32", "int8", "nt", False, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", True, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, True, "original"), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, False, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", True, -1, False, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, True, "original"), ], ) def test_matmul_codegen_default(M, N, K, A_dtype, W_dtype, accum_dtype, out_dtype, layout, with_bias, group_size, with_scaling, with_zeros, zeros_mode): matmul_config = MatmulConfig( M=M, N=N, K=K, A_dtype=A_dtype, W_dtype=W_dtype, accum_dtype=accum_dtype, out_dtype=out_dtype, layout=layout, with_bias=with_bias, group_size=group_size, with_scaling=with_scaling, with_zeros=with_zeros, zeros_mode=zeros_mode, ) matmul = Matmul(config=matmul_config, enable_tuning=False) assert get_codegen_result(matmul) @pytest.mark.parametrize( "M,N,K,A_dtype,W_dtype,accum_dtype,out_dtype,layout,with_bias,group_size,with_scaling,with_zeros,zeros_mode", [ (1, 768, 768, "float16", "float16", "float16", "float16", "nt", False, -1, False, False, None), (768, 768, 768, "float16", "float16", "float16", "float16", "nt", False, -1, False, False, None), (1, 768, 768, "int8", "int8", "int32", "int8", "nt", False, -1, False, False, None), (768, 768, 768, "int8", "int8", "int32", "int8", "nt", False, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", True, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, True, "original"), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, False, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", True, -1, False, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, True, "original"), ], ) def test_matmul_finetune(M, N, K, A_dtype, W_dtype, accum_dtype, out_dtype, layout, with_bias, group_size, with_scaling, with_zeros, zeros_mode): matmul_config = MatmulConfig( M=M, N=N, K=K, A_dtype=A_dtype, W_dtype=W_dtype, accum_dtype=accum_dtype, out_dtype=out_dtype, layout=layout, with_bias=with_bias, group_size=group_size, with_scaling=with_scaling, with_zeros=with_zeros, zeros_mode=zeros_mode, ) matmul = Matmul(config=matmul_config, enable_tuning=False) matmul.hardware_aware_finetune(topk=10) assert get_codegen_result(matmul) @pytest.mark.parametrize( "M,N,K,A_dtype,W_dtype,accum_dtype,out_dtype,layout,with_bias,group_size,with_scaling,with_zeros,zeros_mode", [ (1, 1024, 1024, "float16", "int4", "float16", "float16", "nt", None, None, None, None, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", True, -1, False, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, False, None), (1, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, True, "original"), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, False, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", True, -1, False, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, False, None), (768, 768, 768, "float16", "uint4", "float16", "float16", "nt", False, -1, True, True, "original"), ], ) def test_matmul_torch_forward(M, N, K, A_dtype, W_dtype, accum_dtype, out_dtype, layout, with_bias, group_size, with_scaling, with_zeros, zeros_mode): import torch torch.random.manual_seed(0) import numpy as np from bitblas.quantization import general_compress matmul_config = MatmulConfig( M=M, N=N, K=K, A_dtype=A_dtype, W_dtype=W_dtype, accum_dtype=accum_dtype, out_dtype=out_dtype, layout=layout, with_bias=with_bias, group_size=group_size, with_scaling=with_scaling, with_zeros=with_zeros, zeros_mode=zeros_mode, ) matmul = Matmul(config=matmul_config, enable_tuning=False) input_shape = (M, K) weight_shape = (N, K) if layout == "nt" else (K, N) output_shape = (M, N) inputs = [] inputs.append(torch.rand(input_shape, dtype=torch.float16).cuda() - 0.5) source_format, bit = matmul.BITBLAS_TRICK_DTYPE_MAP[W_dtype] maxq = 2**(bit - 1) zeros = maxq if source_format == "uint": inputs.append(torch.randint(0, maxq, weight_shape, dtype=torch.int8).cuda()) elif source_format == "int": inputs.append(torch.randint(-maxq, maxq, weight_shape, dtype=torch.int8).cuda()) else: raise NotImplementedError inputs.append(torch.rand(output_shape, dtype=torch.float16).cuda()) intweight = inputs[1] intweight = intweight.cpu().numpy().astype(np.int8) if source_format == "int": intweight = intweight + maxq if with_zeros: inputs[1] = inputs[1] - zeros bias = torch.rand((output_shape[-1],), dtype=torch.float16).cuda() ref_result = torch.matmul(inputs[0], (inputs[1].t() if layout == "nt" else inputs[1]).to(torch.float16)) if with_bias: ref_result = ref_result + bias qw_np = general_compress(intweight, source_bits=bit, storage_dtype=np.int8) qw_torch = torch.from_numpy(qw_np).cuda() permuted_inputs = [] permuted_inputs.append(inputs[0]) if matmul.weight_transform is not None: permuted_inputs.append(matmul.weight_transform(qw_torch.cpu()).cuda()) else: permuted_inputs.append(qw_torch) if with_scaling: if group_size == -1: group_size = K permuted_inputs.append(torch.ones([N, K // group_size], dtype=torch.float16).cuda()) if with_zeros: if zeros_mode == "original": permuted_inputs.append( torch.ones([N, K // group_size], dtype=torch.float16).cuda() * zeros) elif zeros_mode == "rescale": original_zeros = torch.ones([N, K // group_size], dtype=torch.float16).cuda() * zeros scaled_zeros = original_zeros * permuted_inputs[-1] permuted_inputs.append(scaled_zeros) elif zeros_mode == "quantized": original_zeros = torch.ones([K // group_size, N], dtype=torch.int8).cuda() * zeros qzeros = general_compress( original_zeros.cpu().numpy(), source_bits=bit, storage_dtype=np.int8) permuted_inputs.append(torch.from_numpy(qzeros).cuda()) else: raise NotImplementedError if with_bias: permuted_inputs.append(bias) permuted_inputs.append(inputs[2]) matmul(*permuted_inputs) print(permuted_inputs[-1]) print(ref_result) if zeros_mode == "rescale": torch.testing.assert_close(permuted_inputs[-1], ref_result, rtol=1e2, atol=1e-0) else: torch.testing.assert_close(permuted_inputs[-1], ref_result, rtol=1e2, atol=1e-1) @pytest.mark.parametrize( "M,N,K,A_dtype,W_dtype,accum_dtype,out_dtype,with_bias", [ (1, 768, 768, "float16", "uint4", "float16", "float16", False), (1, 768, 768, "float16", "int4", "float16", "float16", False), (768, 768, 768, "float16", "uint4", "float16", "float16", False), (768, 768, 768, "float16", "int4", "float16", "float16", False), ], ) def test_matmul_transform_weight( M, N, K, A_dtype, W_dtype, accum_dtype, out_dtype, with_bias, ): import torch torch.random.manual_seed(0) matmul_config = MatmulConfig( M=M, N=N, K=K, A_dtype=A_dtype, W_dtype=W_dtype, accum_dtype=accum_dtype, out_dtype=out_dtype, with_bias=with_bias, ) matmul = Matmul(config=matmul_config, enable_tuning=False) input_shape = (M, K) weight_shape = (N, K) output_shape = (M, N) _, bit = matmul.BITBLAS_TRICK_DTYPE_MAP[W_dtype] maxq = 2**(bit - 1) input_tensor = torch.rand(input_shape, dtype=torch.float16).cuda() intweight_tensor = torch.randint(0, maxq, weight_shape, dtype=torch.int8).cuda() output_tensor = torch.rand(output_shape, dtype=torch.float16).cuda() bias = torch.rand((output_shape[-1],), dtype=torch.float16).cuda() ref_result = torch.matmul(input_tensor, intweight_tensor.t().to(torch.float16)) if with_bias: ref_result = ref_result + bias bitblas_inputs = [input_tensor] intweight_tensor = matmul.transform_weight(intweight_tensor) bitblas_inputs.append(intweight_tensor) if with_bias: bitblas_inputs.append(bias) output_tensor = matmul(*bitblas_inputs) torch.testing.assert_close(output_tensor, ref_result, rtol=1e-2, atol=1e-0) # fmt: on if __name__ == "__main__": bitblas.testing.main()
BitBLAS/testing/python/operators/test_general_matmul_ops.py/0
{ "file_path": "BitBLAS/testing/python/operators/test_general_matmul_ops.py", "repo_id": "BitBLAS", "token_count": 5477 }
163
from ..datasets import SBUCaptionDataset from .datamodule_base import BaseDataModule class SBUCaptionDataModule(BaseDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def dataset_cls(self): return SBUCaptionDataset @property def dataset_name(self): return "sbu"
BridgeTower/src/datamodules/sbu_datamodule.py/0
{ "file_path": "BridgeTower/src/datamodules/sbu_datamodule.py", "repo_id": "BridgeTower", "token_count": 143 }
164
from .meter_module import METERTransformerSS from .bt_module import BTTransformer
BridgeTower/src/modules/__init__.py/0
{ "file_path": "BridgeTower/src/modules/__init__.py", "repo_id": "BridgeTower", "token_count": 22 }
165
from torchvision import transforms from PIL import Image class MinMaxResize: def __init__(self, shorter=800, longer=1333): self.min = shorter self.max = longer def __call__(self, x): w, h = x.size scale = self.min / min(w, h) if h < w: newh, neww = self.min, scale * w else: newh, neww = scale * h, self.min if max(newh, neww) > self.max: scale = self.max / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) newh, neww = newh // 32 * 32, neww // 32 * 32 return x.resize((neww, newh), resample=Image.BICUBIC) class UnNormalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, tensor): """ Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. Returns: Tensor: Normalized image. """ for t, m, s in zip(tensor, self.mean, self.std): t.mul_(s).add_(m) # The normalize code -> t.sub_(m).div_(s) return tensor # This is simple maximum entropy normalization performed in Inception paper inception_normalize = transforms.Compose( [transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])] ) # ViT uses simple non-biased inception normalization # https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132 inception_unnormalize = transforms.Compose( [UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])] ) # ImageNet normalize imagenet_normalize = transforms.Compose( [transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])] )
BridgeTower/src/transforms/utils.py/0
{ "file_path": "BridgeTower/src/transforms/utils.py", "repo_id": "BridgeTower", "token_count": 843 }
166
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from data.pix2pix_dataset import Pix2pixDataset from data.image_folder import make_dataset class CustomDataset(Pix2pixDataset): """ Dataset that loads images from directories Use option --label_dir, --image_dir, --instance_dir to specify the directories. The images in the directories are sorted in alphabetical order and paired in order. """ @staticmethod def modify_commandline_options(parser, is_train): parser = Pix2pixDataset.modify_commandline_options(parser, is_train) parser.set_defaults(preprocess_mode="resize_and_crop") load_size = 286 if is_train else 256 parser.set_defaults(load_size=load_size) parser.set_defaults(crop_size=256) parser.set_defaults(display_winsize=256) parser.set_defaults(label_nc=13) parser.set_defaults(contain_dontcare_label=False) parser.add_argument( "--label_dir", type=str, required=True, help="path to the directory that contains label images" ) parser.add_argument( "--image_dir", type=str, required=True, help="path to the directory that contains photo images" ) parser.add_argument( "--instance_dir", type=str, default="", help="path to the directory that contains instance maps. Leave black if not exists", ) return parser def get_paths(self, opt): label_dir = opt.label_dir label_paths = make_dataset(label_dir, recursive=False, read_cache=True) image_dir = opt.image_dir image_paths = make_dataset(image_dir, recursive=False, read_cache=True) if len(opt.instance_dir) > 0: instance_dir = opt.instance_dir instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True) else: instance_paths = [] assert len(label_paths) == len( image_paths ), "The #images in %s and %s do not match. Is there something wrong?" return label_paths, image_paths, instance_paths
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/custom_dataset.py/0
{ "file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/custom_dataset.py", "repo_id": "Bringing-Old-Photos-Back-to-Life", "token_count": 894 }
167
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import os from collections import OrderedDict import data from options.test_options import TestOptions from models.pix2pix_model import Pix2PixModel from util.visualizer import Visualizer import torchvision.utils as vutils import warnings warnings.filterwarnings("ignore", category=UserWarning) opt = TestOptions().parse() dataloader = data.create_dataloader(opt) model = Pix2PixModel(opt) model.eval() visualizer = Visualizer(opt) single_save_url = os.path.join(opt.checkpoints_dir, opt.name, opt.results_dir, "each_img") if not os.path.exists(single_save_url): os.makedirs(single_save_url) for i, data_i in enumerate(dataloader): if i * opt.batchSize >= opt.how_many: break generated = model(data_i, mode="inference") img_path = data_i["path"] for b in range(generated.shape[0]): img_name = os.path.split(img_path[b])[-1] save_img_url = os.path.join(single_save_url, img_name) vutils.save_image((generated[b] + 1) / 2, save_img_url)
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/test_face.py/0
{ "file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/test_face.py", "repo_id": "Bringing-Old-Photos-Back-to-Life", "token_count": 399 }
168
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import os from collections import OrderedDict from torch.autograd import Variable from options.test_options import TestOptions from models.models import create_model from models.mapping_model import Pix2PixHDModel_Mapping import util.util as util from PIL import Image import torch import torchvision.utils as vutils import torchvision.transforms as transforms import numpy as np import cv2 def data_transforms(img, method=Image.BILINEAR, scale=False): ow, oh = img.size pw, ph = ow, oh if scale == True: if ow < oh: ow = 256 oh = ph / pw * 256 else: oh = 256 ow = pw / ph * 256 h = int(round(oh / 4) * 4) w = int(round(ow / 4) * 4) if (h == ph) and (w == pw): return img return img.resize((w, h), method) def data_transforms_rgb_old(img): w, h = img.size A = img if w < 256 or h < 256: A = transforms.Scale(256, Image.BILINEAR)(img) return transforms.CenterCrop(256)(A) def irregular_hole_synthesize(img, mask): img_np = np.array(img).astype("uint8") mask_np = np.array(mask).astype("uint8") mask_np = mask_np / 255 img_new = img_np * (1 - mask_np) + mask_np * 255 hole_img = Image.fromarray(img_new.astype("uint8")).convert("RGB") return hole_img def parameter_set(opt): ## Default parameters opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.label_nc = 0 opt.n_downsample_global = 3 opt.mc = 64 opt.k_size = 4 opt.start_r = 1 opt.mapping_n_block = 6 opt.map_mc = 512 opt.no_instance = True opt.checkpoints_dir = "./checkpoints/restoration" ## if opt.Quality_restore: opt.name = "mapping_quality" opt.load_pretrainA = os.path.join(opt.checkpoints_dir, "VAE_A_quality") opt.load_pretrainB = os.path.join(opt.checkpoints_dir, "VAE_B_quality") if opt.Scratch_and_Quality_restore: opt.NL_res = True opt.use_SN = True opt.correlation_renormalize = True opt.NL_use_mask = True opt.NL_fusion_method = "combine" opt.non_local = "Setting_42" opt.name = "mapping_scratch" opt.load_pretrainA = os.path.join(opt.checkpoints_dir, "VAE_A_quality") opt.load_pretrainB = os.path.join(opt.checkpoints_dir, "VAE_B_scratch") if opt.HR: opt.mapping_exp = 1 opt.inference_optimize = True opt.mask_dilation = 3 opt.name = "mapping_Patch_Attention" if __name__ == "__main__": opt = TestOptions().parse(save=False) parameter_set(opt) model = Pix2PixHDModel_Mapping() model.initialize(opt) model.eval() if not os.path.exists(opt.outputs_dir + "/" + "input_image"): os.makedirs(opt.outputs_dir + "/" + "input_image") if not os.path.exists(opt.outputs_dir + "/" + "restored_image"): os.makedirs(opt.outputs_dir + "/" + "restored_image") if not os.path.exists(opt.outputs_dir + "/" + "origin"): os.makedirs(opt.outputs_dir + "/" + "origin") dataset_size = 0 input_loader = os.listdir(opt.test_input) dataset_size = len(input_loader) input_loader.sort() if opt.test_mask != "": mask_loader = os.listdir(opt.test_mask) dataset_size = len(os.listdir(opt.test_mask)) mask_loader.sort() img_transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) mask_transform = transforms.ToTensor() for i in range(dataset_size): input_name = input_loader[i] input_file = os.path.join(opt.test_input, input_name) if not os.path.isfile(input_file): print("Skipping non-file %s" % input_name) continue input = Image.open(input_file).convert("RGB") print("Now you are processing %s" % (input_name)) if opt.NL_use_mask: mask_name = mask_loader[i] mask = Image.open(os.path.join(opt.test_mask, mask_name)).convert("RGB") if opt.mask_dilation != 0: kernel = np.ones((3,3),np.uint8) mask = np.array(mask) mask = cv2.dilate(mask,kernel,iterations = opt.mask_dilation) mask = Image.fromarray(mask.astype('uint8')) origin = input input = irregular_hole_synthesize(input, mask) mask = mask_transform(mask) mask = mask[:1, :, :] ## Convert to single channel mask = mask.unsqueeze(0) input = img_transform(input) input = input.unsqueeze(0) else: if opt.test_mode == "Scale": input = data_transforms(input, scale=True) if opt.test_mode == "Full": input = data_transforms(input, scale=False) if opt.test_mode == "Crop": input = data_transforms_rgb_old(input) origin = input input = img_transform(input) input = input.unsqueeze(0) mask = torch.zeros_like(input) ### Necessary input try: with torch.no_grad(): generated = model.inference(input, mask) except Exception as ex: print("Skip %s due to an error:\n%s" % (input_name, str(ex))) continue if input_name.endswith(".jpg"): input_name = input_name[:-4] + ".png" image_grid = vutils.save_image( (input + 1.0) / 2.0, opt.outputs_dir + "/input_image/" + input_name, nrow=1, padding=0, normalize=True, ) image_grid = vutils.save_image( (generated.data.cpu() + 1.0) / 2.0, opt.outputs_dir + "/restored_image/" + input_name, nrow=1, padding=0, normalize=True, ) origin.save(opt.outputs_dir + "/origin/" + input_name)
Bringing-Old-Photos-Back-to-Life/Global/test.py/0
{ "file_path": "Bringing-Old-Photos-Back-to-Life/Global/test.py", "repo_id": "Bringing-Old-Photos-Back-to-Life", "token_count": 2865 }
169
from .CLAPWrapper import CLAPWrapper as CLAP
CLAP/msclap/__init__.py/0
{ "file_path": "CLAP/msclap/__init__.py", "repo_id": "CLAP", "token_count": 14 }
170
.wy-table-responsive table td kbd { white-space: nowrap; } .wy-table-responsive table td { white-space: normal !important; } .wy-table-responsive { overflow: visible !important; }
COCO-LM/fairseq/docs/_static/theme_overrides.css/0
{ "file_path": "COCO-LM/fairseq/docs/_static/theme_overrides.css", "repo_id": "COCO-LM", "token_count": 69 }
171
Overview ======== Fairseq can be extended through user-supplied `plug-ins <https://en.wikipedia.org/wiki/Plug-in_(computing)>`_. We support five kinds of plug-ins: - :ref:`Models` define the neural network architecture and encapsulate all of the learnable parameters. - :ref:`Criterions` compute the loss function given the model outputs and targets. - :ref:`Tasks` store dictionaries and provide helpers for loading/iterating over Datasets, initializing the Model/Criterion and calculating the loss. - :ref:`Optimizers` update the Model parameters based on the gradients. - :ref:`Learning Rate Schedulers` update the learning rate over the course of training. **Training Flow** Given a ``model``, ``criterion``, ``task``, ``optimizer`` and ``lr_scheduler``, fairseq implements the following high-level training flow:: for epoch in range(num_epochs): itr = task.get_batch_iterator(task.dataset('train')) for num_updates, batch in enumerate(itr): task.train_step(batch, model, criterion, optimizer) average_and_clip_gradients() optimizer.step() lr_scheduler.step_update(num_updates) lr_scheduler.step(epoch) where the default implementation for ``task.train_step`` is roughly:: def train_step(self, batch, model, criterion, optimizer, **unused): loss = criterion(model, batch) optimizer.backward(loss) return loss **Registering new plug-ins** New plug-ins are *registered* through a set of ``@register`` function decorators, for example:: @register_model('my_lstm') class MyLSTM(FairseqEncoderDecoderModel): (...) Once registered, new plug-ins can be used with the existing :ref:`Command-line Tools`. See the Tutorial sections for more detailed walkthroughs of how to add new plug-ins. **Loading plug-ins from another directory** New plug-ins can be defined in a custom module stored in the user system. In order to import the module, and make the plugin available to *fairseq*, the command line supports the ``--user-dir`` flag that can be used to specify a custom location for additional modules to load into *fairseq*. For example, assuming this directory tree:: /home/user/my-module/ └── __init__.py with ``__init__.py``:: from fairseq.models import register_model_architecture from fairseq.models.transformer import transformer_vaswani_wmt_en_de_big @register_model_architecture('transformer', 'my_transformer') def transformer_mmt_big(args): transformer_vaswani_wmt_en_de_big(args) it is possible to invoke the :ref:`fairseq-train` script with the new architecture with:: fairseq-train ... --user-dir /home/user/my-module -a my_transformer --task translation
COCO-LM/fairseq/docs/overview.rst/0
{ "file_path": "COCO-LM/fairseq/docs/overview.rst", "repo_id": "COCO-LM", "token_count": 859 }
172
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import fileinput from tqdm import tqdm def main(): parser = argparse.ArgumentParser( description=( "Extract back-translations from the stdout of fairseq-generate. " "If there are multiply hypotheses for a source, we only keep the first one. " ) ) parser.add_argument("--output", required=True, help="output prefix") parser.add_argument( "--srclang", required=True, help="source language (extracted from H-* lines)" ) parser.add_argument( "--tgtlang", required=True, help="target language (extracted from S-* lines)" ) parser.add_argument("--minlen", type=int, help="min length filter") parser.add_argument("--maxlen", type=int, help="max length filter") parser.add_argument("--ratio", type=float, help="ratio filter") parser.add_argument("files", nargs="*", help="input files") args = parser.parse_args() def validate(src, tgt): srclen = len(src.split(" ")) if src != "" else 0 tgtlen = len(tgt.split(" ")) if tgt != "" else 0 if ( (args.minlen is not None and (srclen < args.minlen or tgtlen < args.minlen)) or ( args.maxlen is not None and (srclen > args.maxlen or tgtlen > args.maxlen) ) or ( args.ratio is not None and (max(srclen, tgtlen) / float(min(srclen, tgtlen)) > args.ratio) ) ): return False return True def safe_index(toks, index, default): try: return toks[index] except IndexError: return default with open(args.output + "." + args.srclang, "w") as src_h, open( args.output + "." + args.tgtlang, "w" ) as tgt_h: for line in tqdm(fileinput.input(args.files)): if line.startswith("S-"): tgt = safe_index(line.rstrip().split("\t"), 1, "") elif line.startswith("H-"): if tgt is not None: src = safe_index(line.rstrip().split("\t"), 2, "") if validate(src, tgt): print(src, file=src_h) print(tgt, file=tgt_h) tgt = None if __name__ == "__main__": main()
COCO-LM/fairseq/examples/backtranslation/extract_bt_data.py/0
{ "file_path": "COCO-LM/fairseq/examples/backtranslation/extract_bt_data.py", "repo_id": "COCO-LM", "token_count": 1172 }
173
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import sacremoses def main(args): """Tokenizes, preserving tabs""" mt = sacremoses.MosesTokenizer(lang=args.lang) def tok(s): return mt.tokenize(s, return_str=True) for line in sys.stdin: parts = list(map(tok, line.split("\t"))) print(*parts, sep="\t", flush=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--lang", "-l", default="en") parser.add_argument("--penn", "-p", action="store_true") parser.add_argument("--fields", "-f", help="fields to tokenize") args = parser.parse_args() main(args)
COCO-LM/fairseq/examples/constrained_decoding/tok.py/0
{ "file_path": "COCO-LM/fairseq/examples/constrained_decoding/tok.py", "repo_id": "COCO-LM", "token_count": 313 }
174
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.tasks.translation import TranslationTask from fairseq.tasks.language_modeling import LanguageModelingTask from fairseq import checkpoint_utils import argparse from fairseq.tasks import register_task import torch @register_task("noisy_channel_translation") class NoisyChannelTranslation(TranslationTask): """ Rescore the top k candidates from each beam using noisy channel modeling """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" TranslationTask.add_args(parser) # fmt: off parser.add_argument('--channel-model', metavar='FILE', help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.') parser.add_argument('--combine-method', default='lm_only', choices=['lm_only', 'noisy_channel'], help="""method for combining direct and channel model scores. lm_only: decode with P(T|S)P(T) noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""") parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False, help='normalize lm score by target length instead of source length') parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'], help="Normalize bw scores with log softmax or return bw scores without log softmax") parser.add_argument('--top-k-vocab', default=0, type=int, help='top k vocab IDs to use with `src_vocab` in channel model scoring') parser.add_argument('--k2', default=50, type=int, help='the top k2 candidates to rescore with the noisy channel model for each beam') parser.add_argument('--ch-wt', default=1, type=float, help='weight for the channel model') parser.add_argument('--lm-model', metavar='FILE', help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side') parser.add_argument('--lm-data', metavar='FILE', help='path to lm model training data for target language, used to properly load LM with correct dictionary') parser.add_argument('--lm-wt', default=1, type=float, help='the weight of the lm in joint decoding') # fmt: on def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None ): if getattr(args, "score_reference", False): raise NotImplementedError() else: from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator use_cuda = torch.cuda.is_available() and not self.args.cpu assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!' assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs' if self.args.channel_model is not None: import copy ch_args_task = copy.deepcopy(self.args) tmp = ch_args_task.source_lang ch_args_task.source_lang = ch_args_task.target_lang ch_args_task.target_lang = tmp ch_args_task._name = 'translation' channel_task = TranslationTask.setup_task(ch_args_task) arg_dict = {} arg_dict['task'] = 'language_modeling' arg_dict['sample_break_mode'] = 'eos' arg_dict['data'] = self.args.lm_data arg_dict['output_dictionary_size'] = -1 lm_args = argparse.Namespace(**arg_dict) lm_task = LanguageModelingTask.setup_task(lm_args) lm_dict = lm_task.output_dictionary if self.args.channel_model is not None: channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task) for model in channel_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() else: channel_models = None lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task) for model in lm_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() return NoisyChannelSequenceGenerator( combine_method=self.args.combine_method, tgt_dict=self.target_dictionary, src_dict=self.source_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), normalize_scores=(not getattr(args, 'unnormalized', False)), channel_models=channel_models, k2=getattr(self.args, 'k2', 50), ch_weight=getattr(self.args, 'ch_wt', 1), channel_scoring_type=self.args.channel_scoring_type, top_k_vocab=self.args.top_k_vocab, lm_models=lm_models, lm_dict=lm_dict, lm_weight=getattr(self.args, 'lm_wt', 1), normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False), )
COCO-LM/fairseq/examples/fast_noisy_channel/noisy_channel_translation.py/0
{ "file_path": "COCO-LM/fairseq/examples/fast_noisy_channel/noisy_channel_translation.py", "repo_id": "COCO-LM", "token_count": 3284 }
175
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import multilingual_translation_latent_depth # noqa from .loss import latent_depth # noqa from .models import latent_multilingual_transformer # noqa from .modules import latent_layers # noqa
COCO-LM/fairseq/examples/latent_depth/latent_depth_src/__init__.py/0
{ "file_path": "COCO-LM/fairseq/examples/latent_depth/latent_depth_src/__init__.py", "repo_id": "COCO-LM", "token_count": 103 }
176
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.modules import TransformerEncoderLayer from .multihead_linear_attention import MultiheadLinearAttention class LinformerTransformerEncoderLayer(TransformerEncoderLayer): """ Implements a Linformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__(self, args, shared_compress_layer): # wrap in a list so it's not automatically registered by PyTorch self.shared_compress_layer = [shared_compress_layer] super().__init__(args) self.register_buffer("version", torch.tensor(2)) def build_self_attention(self, embed_dim, args): return MultiheadLinearAttention( embed_dim, args.encoder_attention_heads, dropout=args.dropout, self_attention=True, q_noise=args.quant_noise_pq, qn_block_size=args.quant_noise_pq_block_size, compressed=args.compressed, max_seq_len=args.max_positions, shared_kv_compressed=args.shared_kv_compressed, shared_compress_layer=self.shared_compress_layer[0], freeze_compress=args.freeze_compress, ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" # some old checkpoints had weight sharing implemented incorrectly # (note: this was correct in the original paper code) if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2: state_dict[f"{prefix}version"] = torch.tensor(1) # check compression layer sharing if f"{prefix}shared_compress_layer.weight" in state_dict: # reinitialize block without sharing compression layer to match # old behavior self.shared_compress_layer = [ torch.nn.Linear( self.shared_compress_layer[0].weight.size(1), self.shared_compress_layer[0].weight.size(0), ) ] self.self_attn = self.build_self_attention(self.embed_dim, self.args) # delete shared_compress_layer, since it's already copied to # self_attn.compress_k.weight del state_dict[f"{prefix}shared_compress_layer.weight"] if f"{prefix}shared_compress_layer.bias" in state_dict: del state_dict[f"{prefix}shared_compress_layer.bias"]
COCO-LM/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py/0
{ "file_path": "COCO-LM/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py", "repo_id": "COCO-LM", "token_count": 1213 }
177
# MBART: Multilingual Denoising Pre-training for Neural Machine Translation [https://arxiv.org/abs/2001.08210] ## Introduction MBART is a sequence-to-sequence denoising auto-encoder pre-trained on large-scale monolingual corpora in many languages using the BART objective. mBART is one of the first methods for pre-training a complete sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only on the encoder, decoder, or reconstructing parts of the text. ## Pre-trained models Model | Description | # params | Download ---|---|---|--- `mbart.CC25` | mBART model with 12 encoder and decoder layers trained on 25 languages' monolingual corpus | 610M | [mbart.CC25.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/mbart/mbart.cc25.v2.tar.gz) `mbart.ft.ro_en` | finetune mBART cc25 model on ro-en language pairs | 610M | [mbart.cc25.ft.enro.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/mbart/mbart.cc25.ft.enro.tar.gz) ## Results **[WMT16 EN-RO](https://www.statmt.org/wmt16/translation-task.html)** _(test set, no additional data used)_ Model | en-ro | ro-en ---|---|--- `Random` | 34.3 | 34.0 `mbart.cc25` | 37.7 | 37.8 `mbart.enro.bilingual` | 38.5 | 38.5 ## BPE data # download model wget https://dl.fbaipublicfiles.com/fairseq/models/mbart/mbart.cc25.v2.tar.gz tar -xzvf mbart.CC25.tar.gz # bpe data install SPM [here](https://github.com/google/sentencepiece) ```bash SPM=/path/to/sentencepiece/build/src/spm_encode MODEL=sentence.bpe.model ${SPM} --model=${MODEL} < ${DATA}/${TRAIN}.${SRC} > ${DATA}/${TRAIN}.spm.${SRC} & ${SPM} --model=${MODEL} < ${DATA}/${TRAIN}.${TGT} > ${DATA}/${TRAIN}.spm.${TGT} & ${SPM} --model=${MODEL} < ${DATA}/${VALID}.${SRC} > ${DATA}/${VALID}.spm.${SRC} & ${SPM} --model=${MODEL} < ${DATA}/${VALID}.${TGT} > ${DATA}/${VALID}.spm.${TGT} & ${SPM} --model=${MODEL} < ${DATA}/${TEST}.${SRC} > ${DATA}/${TEST}.spm.${SRC} & ${SPM} --model=${MODEL} < ${DATA}/${TEST}.${TGT} > ${DATA}/${TEST}.spm.${TGT} & ``` ## Preprocess data ```bash DICT=dict.txt fairseq-preprocess \ --source-lang ${SRC} \ --target-lang ${TGT} \ --trainpref ${DATA}/${TRAIN}.spm \ --validpref ${DATA}/${VALID}.spm \ --testpref ${DATA}/${TEST}.spm \ --destdir ${DEST}/${NAME} \ --thresholdtgt 0 \ --thresholdsrc 0 \ --srcdict ${DICT} \ --tgtdict ${DICT} \ --workers 70 ``` ## Finetune on EN-RO Finetune on mbart CC25 ```bash PRETRAIN=mbart.cc25 # fix if you moved the downloaded checkpoint langs=ar_AR,cs_CZ,de_DE,en_XX,es_XX,et_EE,fi_FI,fr_XX,gu_IN,hi_IN,it_IT,ja_XX,kk_KZ,ko_KR,lt_LT,lv_LV,my_MM,ne_NP,nl_XX,ro_RO,ru_RU,si_LK,tr_TR,vi_VN,zh_CN fairseq-train path_2_data \ --encoder-normalize-before --decoder-normalize-before \ --arch mbart_large --layernorm-embedding \ --task translation_from_pretrained_bart \ --source-lang en_XX --target-lang ro_RO \ --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \ --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ --lr-scheduler polynomial_decay --lr 3e-05 --warmup-updates 2500 --total-num-update 40000 \ --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ --max-tokens 1024 --update-freq 2 \ --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \ --seed 222 --log-format simple --log-interval 2 \ --restore-file $PRETRAIN \ --reset-optimizer --reset-meters --reset-dataloader --reset-lr-scheduler \ --langs $langs \ --ddp-backend legacy_ddp ``` ## Generate on EN-RO Get sacrebleu on finetuned en-ro model get tokenizer [here](https://github.com/rsennrich/wmt16-scripts) ```bash wget https://dl.fbaipublicfiles.com/fairseq/models/mbart/mbart.cc25.ft.enro.tar.gz tar -xzvf mbart.cc25.ft.enro.tar.gz ``` ```bash model_dir=MBART_finetuned_enro # fix if you moved the checkpoint fairseq-generate path_2_data \ --path $model_dir/model.pt \ --task translation_from_pretrained_bart \ --gen-subset test \ -t ro_RO -s en_XX \ --bpe 'sentencepiece' --sentencepiece-model $model_dir/sentence.bpe.model \ --sacrebleu --remove-bpe 'sentencepiece' \ --batch-size 32 --langs $langs > en_ro cat en_ro | grep -P "^H" |sort -V |cut -f 3- | sed 's/\[ro_RO\]//g' |$TOKENIZER ro > en_ro.hyp cat en_ro | grep -P "^T" |sort -V |cut -f 2- | sed 's/\[ro_RO\]//g' |$TOKENIZER ro > en_ro.ref sacrebleu -tok 'none' -s 'none' en_ro.ref < en_ro.hyp ``` ## Citation ```bibtex @article{liu2020multilingual, title={Multilingual Denoising Pre-training for Neural Machine Translation}, author={Yinhan Liu and Jiatao Gu and Naman Goyal and Xian Li and Sergey Edunov and Marjan Ghazvininejad and Mike Lewis and Luke Zettlemoyer}, year={2020}, eprint={2001.08210}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
COCO-LM/fairseq/examples/mbart/README.md/0
{ "file_path": "COCO-LM/fairseq/examples/mbart/README.md", "repo_id": "COCO-LM", "token_count": 1979 }
178
#!/bin/bash # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if [ -z $WORKDIR_ROOT ] ; then echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." exit fi SRCDIR=$WORKDIR_ROOT/indic_languages_corpus DESTDIR=${WORKDIR_ROOT}/ML50/raw/ mkdir -p $SRCDIR mkdir -p $DESTDIR cd $SRCDIR wget http://lotus.kuee.kyoto-u.ac.jp/WAT/indic-multilingual/indic_languages_corpus.tar.gz tar -xvzf indic_languages_corpus.tar.gz SRC_EXTRACT_DIR=$SRCDIR/indic_languages_corpus/bilingual cp $SRC_EXTRACT_DIR/ml-en/train.ml $DESTDIR/train.ml_IN-en_XX.ml_IN cp $SRC_EXTRACT_DIR/ml-en/train.en $DESTDIR/train.ml_IN-en_XX.en_XX cp $SRC_EXTRACT_DIR/ml-en/dev.ml $DESTDIR/valid.ml_IN-en_XX.ml_IN cp $SRC_EXTRACT_DIR/ml-en/dev.en $DESTDIR/valid.ml_IN-en_XX.en_XX cp $SRC_EXTRACT_DIR/ml-en/test.ml $DESTDIR/test.ml_IN-en_XX.ml_IN cp $SRC_EXTRACT_DIR/ml-en/test.en $DESTDIR/test.ml_IN-en_XX.en_XX cp $SRC_EXTRACT_DIR/ur-en/train.ur $DESTDIR/train.ur_PK-en_XX.ur_PK cp $SRC_EXTRACT_DIR/ur-en/train.en $DESTDIR/train.ur_PK-en_XX.en_XX cp $SRC_EXTRACT_DIR/ur-en/dev.ur $DESTDIR/valid.ur_PK-en_XX.ur_PK cp $SRC_EXTRACT_DIR/ur-en/dev.en $DESTDIR/valid.ur_PK-en_XX.en_XX cp $SRC_EXTRACT_DIR/ur-en/test.ur $DESTDIR/test.ur_PK-en_XX.ur_PK cp $SRC_EXTRACT_DIR/ur-en/test.en $DESTDIR/test.ur_PK-en_XX.en_XX cp $SRC_EXTRACT_DIR/te-en/train.te $DESTDIR/train.te_IN-en_XX.te_IN cp $SRC_EXTRACT_DIR/te-en/train.en $DESTDIR/train.te_IN-en_XX.en_XX cp $SRC_EXTRACT_DIR/te-en/dev.te $DESTDIR/valid.te_IN-en_XX.te_IN cp $SRC_EXTRACT_DIR/te-en/dev.en $DESTDIR/valid.te_IN-en_XX.en_XX cp $SRC_EXTRACT_DIR/te-en/test.te $DESTDIR/test.te_IN-en_XX.te_IN cp $SRC_EXTRACT_DIR/te-en/test.en $DESTDIR/test.te_IN-en_XX.en_XX
COCO-LM/fairseq/examples/multilingual/data_scripts/download_lotus.sh/0
{ "file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_lotus.sh", "repo_id": "COCO-LM", "token_count": 932 }
179
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from multiprocessing import Pool import numpy as np from fairseq import options from fairseq.data import dictionary from fairseq.scoring import bleu from examples.noisychannel import ( rerank_generate, rerank_options, rerank_score_bw, rerank_score_lm, rerank_utils, ) def score_target_hypo( args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize ): print("lenpen", lenpen, "weight1", a, "weight2", b, "weight3", c) gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst = load_score_files(args) dict = dictionary.Dictionary() scorer = scorer = bleu.Scorer( bleu.BleuConfig( pad=dict.pad(), eos=dict.eos(), unk=dict.unk(), ) ) ordered_hypos = {} ordered_targets = {} for shard_id in range(len(bitext1_lst)): bitext1 = bitext1_lst[shard_id] bitext2 = bitext2_lst[shard_id] gen_output = gen_output_lst[shard_id] lm_res = lm_res_lst[shard_id] total = len(bitext1.rescore_source.keys()) source_lst = [] hypo_lst = [] score_lst = [] reference_lst = [] j = 1 best_score = -math.inf for i in range(total): # length is measured in terms of words, not bpe tokens, since models may not share the same bpe target_len = len(bitext1.rescore_hypo[i].split()) if lm_res is not None: lm_score = lm_res.score[i] else: lm_score = 0 if bitext2 is not None: bitext2_score = bitext2.rescore_score[i] bitext2_backwards = bitext2.backwards else: bitext2_score = None bitext2_backwards = None score = rerank_utils.get_score( a, b, c, target_len, bitext1.rescore_score[i], bitext2_score, lm_score=lm_score, lenpen=lenpen, src_len=bitext1.source_lengths[i], tgt_len=bitext1.target_lengths[i], bitext1_backwards=bitext1.backwards, bitext2_backwards=bitext2_backwards, normalize=normalize, ) if score > best_score: best_score = score best_hypo = bitext1.rescore_hypo[i] if j == gen_output.num_hypos[i] or j == args.num_rescore: j = 1 hypo_lst.append(best_hypo) score_lst.append(best_score) source_lst.append(bitext1.rescore_source[i]) reference_lst.append(bitext1.rescore_target[i]) best_score = -math.inf best_hypo = "" else: j += 1 gen_keys = list(sorted(gen_output.no_bpe_target.keys())) for key in range(len(gen_keys)): if args.prefix_len is None: assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], ( "pred and rescore hypo mismatch: i: " + str(key) + ", " + str(hypo_lst[key]) + str(gen_keys[key]) + str(gen_output.no_bpe_hypo[key]) ) sys_tok = dict.encode_line(hypo_lst[key]) ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]]) scorer.add(ref_tok, sys_tok) else: full_hypo = rerank_utils.get_full_from_prefix( hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]] ) sys_tok = dict.encode_line(full_hypo) ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]]) scorer.add(ref_tok, sys_tok) # if only one set of hyper parameters is provided, write the predictions to a file if write_hypos: # recover the orinal ids from n best list generation for key in range(len(gen_output.no_bpe_target)): if args.prefix_len is None: assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], ( "pred and rescore hypo mismatch:" + "i:" + str(key) + str(hypo_lst[key]) + str(gen_output.no_bpe_hypo[key]) ) ordered_hypos[gen_keys[key]] = hypo_lst[key] ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[ gen_keys[key] ] else: full_hypo = rerank_utils.get_full_from_prefix( hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]] ) ordered_hypos[gen_keys[key]] = full_hypo ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[ gen_keys[key] ] # write the hypos in the original order from nbest list generation if args.num_shards == (len(bitext1_lst)): with open(target_outfile, "w") as t: with open(hypo_outfile, "w") as h: for key in range(len(ordered_hypos)): t.write(ordered_targets[key]) h.write(ordered_hypos[key]) res = scorer.result_string(4) if write_hypos: print(res) score = rerank_utils.parse_bleu_scoring(res) return score def match_target_hypo(args, target_outfile, hypo_outfile): """combine scores from the LM and bitext models, and write the top scoring hypothesis to a file""" if len(args.weight1) == 1: res = score_target_hypo( args, args.weight1[0], args.weight2[0], args.weight3[0], args.lenpen[0], target_outfile, hypo_outfile, True, args.normalize, ) rerank_scores = [res] else: print("launching pool") with Pool(32) as p: rerank_scores = p.starmap( score_target_hypo, [ ( args, args.weight1[i], args.weight2[i], args.weight3[i], args.lenpen[i], target_outfile, hypo_outfile, False, args.normalize, ) for i in range(len(args.weight1)) ], ) if len(rerank_scores) > 1: best_index = np.argmax(rerank_scores) best_score = rerank_scores[best_index] print("best score", best_score) print("best lenpen", args.lenpen[best_index]) print("best weight1", args.weight1[best_index]) print("best weight2", args.weight2[best_index]) print("best weight3", args.weight3[best_index]) return ( args.lenpen[best_index], args.weight1[best_index], args.weight2[best_index], args.weight3[best_index], best_score, ) else: return ( args.lenpen[0], args.weight1[0], args.weight2[0], args.weight3[0], rerank_scores[0], ) def load_score_files(args): if args.all_shards: shard_ids = list(range(args.num_shards)) else: shard_ids = [args.shard_id] gen_output_lst = [] bitext1_lst = [] bitext2_lst = [] lm_res1_lst = [] for shard_id in shard_ids: using_nbest = args.nbest_list is not None ( pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir, ) = rerank_utils.get_directories( args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) rerank1_is_gen = ( args.gen_model == args.score_model1 and args.source_prefix_frac is None ) rerank2_is_gen = ( args.gen_model == args.score_model2 and args.source_prefix_frac is None ) score1_file = rerank_utils.rescore_file_name( pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1, ) if args.score_model2 is not None: score2_file = rerank_utils.rescore_file_name( pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2, ) if args.language_model is not None: lm_score_file = rerank_utils.rescore_file_name( pre_gen, args.prefix_len, args.lm_name, lm_file=True ) # get gen output predictions_bpe_file = pre_gen + "/generate_output_bpe.txt" if using_nbest: print("Using predefined n-best list from interactive.py") predictions_bpe_file = args.nbest_list gen_output = rerank_utils.BitextOutputFromGen( predictions_bpe_file, bpe_symbol=args.post_process, nbest=using_nbest, prefix_len=args.prefix_len, target_prefix_frac=args.target_prefix_frac, ) if rerank1_is_gen: bitext1 = gen_output else: bitext1 = rerank_utils.BitextOutput( score1_file, args.backwards1, args.right_to_left1, args.post_process, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) if args.score_model2 is not None or args.nbest_list is not None: if rerank2_is_gen: bitext2 = gen_output else: bitext2 = rerank_utils.BitextOutput( score2_file, args.backwards2, args.right_to_left2, args.post_process, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) assert ( bitext2.source_lengths == bitext1.source_lengths ), "source lengths for rescoring models do not match" assert ( bitext2.target_lengths == bitext1.target_lengths ), "target lengths for rescoring models do not match" else: if args.diff_bpe: assert args.score_model2 is None bitext2 = gen_output else: bitext2 = None if args.language_model is not None: lm_res1 = rerank_utils.LMOutput( lm_score_file, args.lm_dict, args.prefix_len, args.post_process, args.target_prefix_frac, ) else: lm_res1 = None gen_output_lst.append(gen_output) bitext1_lst.append(bitext1) bitext2_lst.append(bitext2) lm_res1_lst.append(lm_res1) return gen_output_lst, bitext1_lst, bitext2_lst, lm_res1_lst def rerank(args): if type(args.lenpen) is not list: args.lenpen = [args.lenpen] if type(args.weight1) is not list: args.weight1 = [args.weight1] if type(args.weight2) is not list: args.weight2 = [args.weight2] if type(args.weight3) is not list: args.weight3 = [args.weight3] if args.all_shards: shard_ids = list(range(args.num_shards)) else: shard_ids = [args.shard_id] for shard_id in shard_ids: ( pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir, ) = rerank_utils.get_directories( args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) rerank_generate.gen_and_reprocess_nbest(args) rerank_score_bw.score_bw(args) rerank_score_lm.score_lm(args) if args.write_hypos is None: write_targets = pre_gen + "/matched_targets" write_hypos = pre_gen + "/matched_hypos" else: write_targets = args.write_hypos + "_targets" + args.gen_subset write_hypos = args.write_hypos + "_hypos" + args.gen_subset if args.all_shards: write_targets += "_all_shards" write_hypos += "_all_shards" ( best_lenpen, best_weight1, best_weight2, best_weight3, best_score, ) = match_target_hypo(args, write_targets, write_hypos) return best_lenpen, best_weight1, best_weight2, best_weight3, best_score def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) rerank(args) if __name__ == "__main__": cli_main()
COCO-LM/fairseq/examples/noisychannel/rerank.py/0
{ "file_path": "COCO-LM/fairseq/examples/noisychannel/rerank.py", "repo_id": "COCO-LM", "token_count": 7934 }
180
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import re import sys class OOVIndexError(IndexError): def __init__(self, pos, source_seq, target_seq): super(OOVIndexError, self).__init__( "A <unk-N> tag in the target sequence refers to a position that is " "outside the source sequence. Most likely there was a mismatch in " "provided source and target sequences. Otherwise this would mean that " "the pointing mechanism somehow attended to a position that is past " "the actual sequence end." ) self.source_pos = pos self.source_seq = source_seq self.target_seq = target_seq def replace_oovs(source_in, target_in, target_out): """Replaces <unk-N> tokens in the target text with the corresponding word in the source text. """ oov_re = re.compile("^<unk-([0-9]+)>$") for source_seq, target_seq in zip(source_in, target_in): target_seq_out = [] pos_to_word = source_seq.strip().split() for token in target_seq.strip().split(): m = oov_re.match(token) if m: pos = int(m.group(1)) if pos >= len(pos_to_word): raise OOVIndexError(pos, source_seq, target_seq) token_out = pos_to_word[pos] else: token_out = token target_seq_out.append(token_out) target_out.write(" ".join(target_seq_out) + "\n") def main(): parser = argparse.ArgumentParser( description="Replaces <unk-N> tokens in target sequences with words from " "the corresponding position in the source sequence." ) parser.add_argument( "--source", type=str, help="text file with source sequences", required=True ) parser.add_argument( "--target", type=str, help="text file with target sequences", required=True ) parser.add_argument( "--target-out", type=str, help="where to write target sequences without <unk-N> " "entries", required=True, ) args = parser.parse_args() target_in = ( open(args.target, "r", encoding="utf-8") if args.target is not None else None ) target_out = ( open(args.target_out, "w", encoding="utf-8") if args.target_out is not None else None ) with open(args.source, "r", encoding="utf-8") as source_in, open( args.target, "r", encoding="utf-8" ) as target_in, open(args.target_out, "w", encoding="utf-8") as target_out: replace_oovs(source_in, target_in, target_out) if __name__ == "__main__": try: main() except OOVIndexError as e: print(e, file=sys.stderr) print("Source sequence:", e.source_seq.strip(), file=sys.stderr) print("Target sequence:", e.target_seq.strip(), file=sys.stderr) print( "Source sequence length:", len(e.source_seq.strip().split()), file=sys.stderr, ) print("The offending tag points to:", e.source_pos) sys.exit(2)
COCO-LM/fairseq/examples/pointer_generator/postprocess.py/0
{ "file_path": "COCO-LM/fairseq/examples/pointer_generator/postprocess.py", "repo_id": "COCO-LM", "token_count": 1401 }
181
#!/bin/bash # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # data should be downloaded and processed with reprocess_RACE.py if [[ $# -ne 2 ]]; then echo "Run as following:" echo "./examples/roberta/preprocess_RACE.sh <race_data_folder> <output_folder>" exit 1 fi RACE_DATA_FOLDER=$1 OUT_DATA_FOLDER=$2 # download bpe encoder.json, vocabulary and fairseq dictionary wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt' SPLITS="train dev test-middle test-high" INPUT_TYPES="input0 input1 input2 input3 input4" for INPUT_TYPE in $INPUT_TYPES do for SPLIT in $SPLITS do echo "BPE encoding $SPLIT/$INPUT_TYPE" python -m examples.roberta.multiprocessing_bpe_encoder \ --encoder-json encoder.json \ --vocab-bpe vocab.bpe \ --inputs "$RACE_DATA_FOLDER/$SPLIT.$INPUT_TYPE" \ --outputs "$RACE_DATA_FOLDER/$SPLIT.$INPUT_TYPE.bpe" \ --workers 10 \ --keep-empty; done done for INPUT_TYPE in $INPUT_TYPES do LANG="input$INPUT_TYPE" fairseq-preprocess \ --only-source \ --trainpref "$RACE_DATA_FOLDER/train.$INPUT_TYPE.bpe" \ --validpref "$RACE_DATA_FOLDER/dev.$INPUT_TYPE.bpe" \ --testpref "$RACE_DATA_FOLDER/test-middle.$INPUT_TYPE.bpe,$RACE_DATA_FOLDER/test-high.$INPUT_TYPE.bpe" \ --destdir "$OUT_DATA_FOLDER/$INPUT_TYPE" \ --workers 10 \ --srcdict dict.txt; done rm -rf "$OUT_DATA_FOLDER/label" mkdir -p "$OUT_DATA_FOLDER/label" cp "$RACE_DATA_FOLDER/train.label" "$OUT_DATA_FOLDER/label/" cp "$RACE_DATA_FOLDER/dev.label" "$OUT_DATA_FOLDER/label/valid.label" cp "$RACE_DATA_FOLDER/test-middle.label" "$OUT_DATA_FOLDER/label/test.label" cp "$RACE_DATA_FOLDER/test-high.label" "$OUT_DATA_FOLDER/label/test1.label"
COCO-LM/fairseq/examples/roberta/preprocess_RACE.sh/0
{ "file_path": "COCO-LM/fairseq/examples/roberta/preprocess_RACE.sh", "repo_id": "COCO-LM", "token_count": 932 }
182
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import time from functools import partial from multiprocessing.pool import ThreadPool as Pool from . import DEFAULT_EOS, GET, SEND class Agent(object): "an agent needs to follow this pattern" def __init__(self, *args, **kwargs): pass def init_states(self, *args, **kwargs): raise NotImplementedError def update_states(self, states, new_state): raise NotImplementedError def finish_eval(self, states, new_state): raise NotImplementedError def policy(self, state): raise NotImplementedError def reset(self): raise NotImplementedError def decode(self, session, low=0, high=100000, num_thread=10): corpus_info = session.corpus_info() high = min(corpus_info["num_sentences"] - 1, high) if low >= high: return t0 = time.time() if num_thread > 1: with Pool(10) as p: p.map( partial(self._decode_one, session), [sent_id for sent_id in range(low, high + 1)], ) else: for sent_id in range(low, high + 1): self._decode_one(session, sent_id) print(f"Finished {low} to {high} in {time.time() - t0}s") def _decode_one(self, session, sent_id): action = {} self.reset() states = self.init_states() while action.get("value", None) != DEFAULT_EOS: # take an action action = self.policy(states) if action["key"] == GET: new_states = session.get_src(sent_id, action["value"]) states = self.update_states(states, new_states) elif action["key"] == SEND: session.send_hypo(sent_id, action["value"]) print(" ".join(states["tokens"]["tgt"]))
COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/agent.py/0
{ "file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/agent.py", "repo_id": "COCO-LM", "token_count": 907 }
183
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from examples.simultaneous_translation.utils.functions import ( exclusive_cumprod, lengths_to_mask, ) from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules import MultiheadAttention from fairseq.utils import convert_padding_direction from . import register_monotonic_attention @with_incremental_state class MonotonicAttention(nn.Module): """ Abstract class of monotonic attentions """ def __init__(self, args): self.eps = args.attention_eps self.mass_preservation = args.mass_preservation self.noise_type = args.noise_type self.noise_mean = args.noise_mean self.noise_var = args.noise_var self.energy_bias_init = args.energy_bias_init self.energy_bias = ( nn.Parameter(self.energy_bias_init * torch.ones([1])) if args.energy_bias is True else 0 ) @staticmethod def add_args(parser): # fmt: off parser.add_argument('--no-mass-preservation', action="store_false", dest="mass_preservation", help='Do not stay on the last token when decoding') parser.add_argument('--mass-preservation', action="store_true", dest="mass_preservation", help='Stay on the last token when decoding') parser.set_defaults(mass_preservation=True) parser.add_argument('--noise-var', type=float, default=1.0, help='Variance of discretness noise') parser.add_argument('--noise-mean', type=float, default=0.0, help='Mean of discretness noise') parser.add_argument('--noise-type', type=str, default="flat", help='Type of discretness noise') parser.add_argument('--energy-bias', action="store_true", default=False, help='Bias for energy') parser.add_argument('--energy-bias-init', type=float, default=-2.0, help='Initial value of the bias for energy') parser.add_argument('--attention-eps', type=float, default=1e-6, help='Epsilon when calculating expected attention') def p_choose(self, *args): raise NotImplementedError def input_projections(self, *args): raise NotImplementedError def attn_energy( self, q_proj, k_proj, key_padding_mask=None, attn_mask=None ): """ Calculating monotonic energies ============================================================ Expected input size q_proj: bsz * num_heads, tgt_len, self.head_dim k_proj: bsz * num_heads, src_len, self.head_dim key_padding_mask: bsz, src_len attn_mask: tgt_len, src_len """ bsz, tgt_len, embed_dim = q_proj.size() bsz = bsz // self.num_heads src_len = k_proj.size(1) attn_energy = ( torch.bmm(q_proj, k_proj.transpose(1, 2)) + self.energy_bias ) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_energy += attn_mask attn_energy = attn_energy.view(bsz, self.num_heads, tgt_len, src_len) if key_padding_mask is not None: attn_energy = attn_energy.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), float("-inf"), ) return attn_energy def expected_alignment_train(self, p_choose, key_padding_mask): """ Calculating expected alignment for MMA Mask is not need because p_choose will be 0 if masked q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j} a_ij = p_ij q_ij Parallel solution: ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi)) ============================================================ Expected input size p_choose: bsz * num_heads, tgt_len, src_len """ # p_choose: bsz * num_heads, tgt_len, src_len bsz_num_heads, tgt_len, src_len = p_choose.size() # cumprod_1mp : bsz * num_heads, tgt_len, src_len cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=self.eps) cumprod_1mp_clamp = torch.clamp(cumprod_1mp, self.eps, 1.0) init_attention = p_choose.new_zeros([bsz_num_heads, 1, src_len]) init_attention[:, :, 0] = 1.0 previous_attn = [init_attention] for i in range(tgt_len): # p_choose: bsz * num_heads, tgt_len, src_len # cumprod_1mp_clamp : bsz * num_heads, tgt_len, src_len # previous_attn[i]: bsz * num_heads, 1, src_len # alpha_i: bsz * num_heads, src_len alpha_i = ( p_choose[:, i] * cumprod_1mp[:, i] * torch.cumsum(previous_attn[i][:, 0] / cumprod_1mp_clamp[:, i], dim=1) ).clamp(0, 1.0) previous_attn.append(alpha_i.unsqueeze(1)) # alpha: bsz * num_heads, tgt_len, src_len alpha = torch.cat(previous_attn[1:], dim=1) if self.mass_preservation: # Last token has the residual probabilities if key_padding_mask is not None and key_padding_mask[:, -1].any(): # right padding batch_size = key_padding_mask.size(0) residuals = 1 - alpha.sum(dim=-1, keepdim=True).clamp(0.0, 1.0) src_lens = src_len - key_padding_mask.sum(dim=1, keepdim=True) src_lens = src_lens.expand( batch_size, self.num_heads ).contiguous().view(-1, 1) src_lens = src_lens.expand(-1, tgt_len).contiguous() # add back the last value residuals += alpha.gather(2, src_lens.unsqueeze(-1) - 1) alpha = alpha.scatter(2, src_lens.unsqueeze(-1) - 1, residuals) else: residuals = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0.0, 1.0) alpha[:, :, -1] = residuals if torch.isnan(alpha).any(): # Something is wrong raise RuntimeError("NaN in alpha.") return alpha def expected_alignment_infer( self, p_choose, encoder_padding_mask, incremental_state ): # TODO modify this function """ Calculating mo alignment for MMA during inference time ============================================================ Expected input size p_choose: bsz * num_heads, tgt_len, src_len incremental_state: dict encodencoder_padding_mask: bsz * src_len """ # p_choose: bsz * self.num_heads, src_len bsz_num_heads, tgt_len, src_len = p_choose.size() # One token at a time assert tgt_len == 1 p_choose = p_choose[:, 0, :] monotonic_cache = self._get_monotonic_buffer(incremental_state) # prev_monotonic_step: bsz, num_heads bsz = bsz_num_heads // self.num_heads prev_monotonic_step = monotonic_cache.get( "head_step", p_choose.new_zeros([bsz, self.num_heads]).long() ) bsz, num_heads = prev_monotonic_step.size() assert num_heads == self.num_heads assert bsz * num_heads == bsz_num_heads # p_choose: bsz, num_heads, src_len p_choose = p_choose.view(bsz, num_heads, src_len) if encoder_padding_mask is not None: src_lengths = src_len - \ encoder_padding_mask.sum(dim=1, keepdim=True).long() else: src_lengths = prev_monotonic_step.new_ones(bsz, 1) * src_len # src_lengths: bsz, num_heads src_lengths = src_lengths.expand_as(prev_monotonic_step) # new_monotonic_step: bsz, num_heads new_monotonic_step = prev_monotonic_step step_offset = 0 if encoder_padding_mask is not None: if encoder_padding_mask[:, 0].any(): # left_pad_source = True: step_offset = encoder_padding_mask.sum(dim=-1, keepdim=True) max_steps = src_lengths - 1 if self.mass_preservation else src_lengths # finish_read: bsz, num_heads finish_read = new_monotonic_step.eq(max_steps) p_choose_i = 1 while finish_read.sum().item() < bsz * self.num_heads: # p_choose: bsz * self.num_heads, src_len # only choose the p at monotonic steps # p_choose_i: bsz , self.num_heads p_choose_i = ( p_choose.gather( 2, (step_offset + new_monotonic_step) .unsqueeze(2) .clamp(0, src_len - 1), ) ).squeeze(2) action = ( (p_choose_i < 0.5) .type_as(prev_monotonic_step) .masked_fill(finish_read, 0) ) # 1 x bsz # sample actions on unfinished seq # 1 means stay, finish reading # 0 means leave, continue reading # dist = torch.distributions.bernoulli.Bernoulli(p_choose) # action = dist.sample().type_as(finish_read) * (1 - finish_read) new_monotonic_step += action finish_read = new_monotonic_step.eq(max_steps) | (action == 0) monotonic_cache["head_step"] = new_monotonic_step # Whether a head is looking for new input monotonic_cache["head_read"] = ( new_monotonic_step.eq(max_steps) & (p_choose_i < 0.5) ) # alpha: bsz * num_heads, 1, src_len # new_monotonic_step: bsz, num_heads alpha = ( p_choose .new_zeros([bsz * self.num_heads, src_len]) .scatter( 1, (step_offset + new_monotonic_step) .view(bsz * self.num_heads, 1).clamp(0, src_len - 1), 1 ) ) if not self.mass_preservation: alpha = alpha.masked_fill( (new_monotonic_step == max_steps) .view(bsz * self.num_heads, 1), 0 ) alpha = alpha.unsqueeze(1) self._set_monotonic_buffer(incremental_state, monotonic_cache) return alpha def _get_monotonic_buffer(self, incremental_state): return utils.get_incremental_state( self, incremental_state, 'monotonic', ) or {} def _set_monotonic_buffer(self, incremental_state, buffer): utils.set_incremental_state( self, incremental_state, 'monotonic', buffer, ) def v_proj_output(self, value): raise NotImplementedError def forward( self, query, key, value, key_padding_mask=None, attn_mask=None, incremental_state=None, need_weights=True, static_kv=False, *args, **kwargs ): tgt_len, bsz, embed_dim = query.size() src_len = value.size(0) # stepwise prob # p_choose: bsz * self.num_heads, tgt_len, src_len p_choose = self.p_choose( query, key, key_padding_mask, incremental_state, ) # expected alignment alpha # bsz * self.num_heads, tgt_len, src_len if incremental_state is not None: alpha = self.expected_alignment_infer( p_choose, key_padding_mask, incremental_state) else: alpha = self.expected_alignment_train( p_choose, key_padding_mask) # expected attention beta # bsz * self.num_heads, tgt_len, src_len beta = self.expected_attention( alpha, query, key, value, key_padding_mask, attn_mask, incremental_state ) attn_weights = beta v_proj = self.v_proj_output(value) attn = torch.bmm(attn_weights.type_as(v_proj), v_proj) attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) beta = beta.view(bsz, self.num_heads, tgt_len, src_len) alpha = alpha.view(bsz, self.num_heads, tgt_len, src_len) p_choose = p_choose.view(bsz, self.num_heads, tgt_len, src_len) return attn, { "alpha": alpha, "beta": beta, "p_choose": p_choose, } @register_monotonic_attention("hard_aligned") class MonotonicMultiheadAttentionHardAligned( MonotonicAttention, MultiheadAttention ): def __init__(self, args): MultiheadAttention.__init__( self, embed_dim=args.decoder_embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, ) MonotonicAttention.__init__(self, args) self.k_in_proj = {"monotonic": self.k_proj} self.q_in_proj = {"monotonic": self.q_proj} self.v_in_proj = {"output": self.v_proj} def input_projections(self, query, key, value, name): """ Prepare inputs for multihead attention ============================================================ Expected input size query: tgt_len, bsz, embed_dim key: src_len, bsz, embed_dim value: src_len, bsz, embed_dim name: monotonic or soft """ if query is not None: bsz = query.size(1) q = self.q_in_proj[name](query) q *= self.scaling q = q.contiguous().view( -1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) else: q = None if key is not None: bsz = key.size(1) k = self.k_in_proj[name](key) k = k.contiguous().view( -1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) else: k = None if value is not None: bsz = value.size(1) v = self.v_in_proj[name](value) v = v.contiguous().view( -1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) else: v = None return q, k, v def p_choose( self, query, key, key_padding_mask=None, incremental_state=None, *extra_args ): """ Calculating step wise prob for reading and writing 1 to read, 0 to write ============================================================ Expected input size query: bsz, tgt_len, embed_dim key: bsz, src_len, embed_dim value: bsz, src_len, embed_dim key_padding_mask: bsz, src_len attn_mask: bsz, src_len query: bsz, tgt_len, embed_dim """ # prepare inputs q_proj, k_proj, _ = self.input_projections( query, key, None, "monotonic" ) # attention energy attn_energy = self.attn_energy(q_proj, k_proj, key_padding_mask) noise = 0 if self.training: # add noise here to encourage discretness noise = ( torch.normal(self.noise_mean, self.noise_var, attn_energy.size()) .type_as(attn_energy) .to(attn_energy.device) ) p_choose = torch.sigmoid(attn_energy + noise) _, _, tgt_len, src_len = p_choose.size() # p_choose: bsz * self.num_heads, tgt_len, src_len return p_choose.view(-1, tgt_len, src_len) def expected_attention(self, alpha, *args): """ For MMA-H, beta = alpha """ return alpha def v_proj_output(self, value): _, _, v_proj = self.input_projections(None, None, value, "output") return v_proj @register_monotonic_attention("infinite_lookback") class MonotonicMultiheadAttentionInfiniteLookback( MonotonicMultiheadAttentionHardAligned ): def __init__(self, args): super().__init__(args) self.init_soft_attention() def init_soft_attention(self): self.k_proj_soft = nn.Linear(self.kdim, self.embed_dim, bias=True) self.q_proj_soft = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.k_in_proj["soft"] = self.k_proj_soft self.q_in_proj["soft"] = self.q_proj_soft if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_( self.k_in_proj["soft"].weight, gain=1 / math.sqrt(2) ) nn.init.xavier_uniform_( self.q_in_proj["soft"].weight, gain=1 / math.sqrt(2) ) else: nn.init.xavier_uniform_(self.k_in_proj["soft"].weight) nn.init.xavier_uniform_(self.q_in_proj["soft"].weight) def expected_attention( self, alpha, query, key, value, key_padding_mask, attn_mask, incremental_state ): # monotonic attention, we will calculate milk here bsz_x_num_heads, tgt_len, src_len = alpha.size() bsz = int(bsz_x_num_heads / self.num_heads) q, k, _ = self.input_projections(query, key, None, "soft") soft_energy = self.attn_energy(q, k, key_padding_mask, attn_mask) assert list(soft_energy.size()) == \ [bsz, self.num_heads, tgt_len, src_len] soft_energy = soft_energy.view(bsz * self.num_heads, tgt_len, src_len) if incremental_state is not None: monotonic_cache = self._get_monotonic_buffer(incremental_state) monotonic_length = monotonic_cache["head_step"] + 1 step_offset = 0 if key_padding_mask is not None: if key_padding_mask[:, 0].any(): # left_pad_source = True: step_offset = key_padding_mask.sum(dim=-1, keepdim=True) monotonic_length += step_offset mask = lengths_to_mask( monotonic_length.view(-1), soft_energy.size(2), 1 ).unsqueeze(1) soft_energy = soft_energy.masked_fill(~mask.bool(), float("-inf")) soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) exp_soft_energy_sum = exp_soft_energy.sum(dim=2) beta = exp_soft_energy / exp_soft_energy_sum.unsqueeze(2) else: soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) + self.eps inner_items = alpha / (torch.cumsum(exp_soft_energy, dim=2)) beta = ( exp_soft_energy * torch.cumsum(inner_items.flip(dims=[2]), dim=2) .flip(dims=[2]) ) beta = beta.view(bsz, self.num_heads, tgt_len, src_len) if key_padding_mask is not None: beta = beta.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), 0) beta = beta / beta.sum(dim=3, keepdim=True) beta = beta.view(bsz * self.num_heads, tgt_len, src_len) beta = self.dropout_module(beta) if torch.isnan(beta).any(): # Something is wrong raise RuntimeError("NaN in beta.") return beta @register_monotonic_attention("waitk") class MonotonicMultiheadAttentionWaitK( MonotonicMultiheadAttentionInfiniteLookback ): def __init__(self, args): super().__init__(args) self.q_in_proj["soft"] = self.q_in_proj["monotonic"] self.k_in_proj["soft"] = self.k_in_proj["monotonic"] self.waitk_lagging = args.waitk_lagging assert self.waitk_lagging > 0, ( f"Lagging has to been larger than 0, get {self.waitk_lagging}." ) @staticmethod def add_args(parser): super( MonotonicMultiheadAttentionWaitK, MonotonicMultiheadAttentionWaitK, ).add_args(parser) parser.add_argument( "--waitk-lagging", type=int, required=True, help="Wait K lagging" ) def p_choose( self, query, key, key_padding_mask=None, incremental_state=None, *extra_args ): """ query: bsz, tgt_len key: bsz, src_len key_padding_mask: bsz, src_len """ if incremental_state is not None: # Retrieve target length from incremental states # For inference the length of query is always 1 tgt_len = int(incremental_state["steps"]["tgt"]) else: tgt_len, bsz, _ = query.size() src_len, bsz, _ = key.size() p_choose = query.new_ones(bsz, tgt_len, src_len) p_choose = torch.tril(p_choose, diagonal=self.waitk_lagging - 1) p_choose = torch.triu(p_choose, diagonal=self.waitk_lagging - 1) if incremental_state is not None: p_choose = p_choose[:, -1:] tgt_len = 1 # Extend to each head p_choose = ( p_choose.contiguous() .unsqueeze(1) .expand(-1, self.num_heads, -1, -1) .contiguous() .view(-1, tgt_len, src_len) ) return p_choose
COCO-LM/fairseq/examples/simultaneous_translation/modules/monotonic_multihead_attention.py/0
{ "file_path": "COCO-LM/fairseq/examples/simultaneous_translation/modules/monotonic_multihead_attention.py", "repo_id": "COCO-LM", "token_count": 11073 }
184
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import concurrent.futures import json import multiprocessing import os from collections import namedtuple from itertools import chain import sentencepiece as spm from fairseq.data import Dictionary MILLISECONDS_TO_SECONDS = 0.001 def process_sample(aud_path, lable, utt_id, sp, tgt_dict): import torchaudio input = {} output = {} si, ei = torchaudio.info(aud_path) input["length_ms"] = int( si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS ) input["path"] = aud_path token = " ".join(sp.EncodeAsPieces(lable)) ids = tgt_dict.encode_line(token, append_eos=False) output["text"] = lable output["token"] = token output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids])) return {utt_id: {"input": input, "output": output}} def main(): parser = argparse.ArgumentParser() parser.add_argument( "--audio-dirs", nargs="+", default=["-"], required=True, help="input directories with audio files", ) parser.add_argument( "--labels", required=True, help="aggregated input labels with format <ID LABEL> per line", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument( "--spm-model", required=True, help="sentencepiece model to use for encoding", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument( "--dictionary", required=True, help="file to load fairseq dictionary from", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav") parser.add_argument( "--output", required=True, type=argparse.FileType("w"), help="path to save json output", ) args = parser.parse_args() sp = spm.SentencePieceProcessor() sp.Load(args.spm_model.name) tgt_dict = Dictionary.load(args.dictionary) labels = {} for line in args.labels: (utt_id, label) = line.split(" ", 1) labels[utt_id] = label if len(labels) == 0: raise Exception("No labels found in ", args.labels_path) Sample = namedtuple("Sample", "aud_path utt_id") samples = [] for path, _, files in chain.from_iterable( os.walk(path) for path in args.audio_dirs ): for f in files: if f.endswith(args.audio_format): if len(os.path.splitext(f)) != 2: raise Exception("Expect <utt_id.extension> file name. Got: ", f) utt_id = os.path.splitext(f)[0] if utt_id not in labels: continue samples.append(Sample(os.path.join(path, f), utt_id)) utts = {} num_cpu = multiprocessing.cpu_count() with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor: future_to_sample = { executor.submit( process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict ): s for s in samples } for future in concurrent.futures.as_completed(future_to_sample): try: data = future.result() except Exception as exc: print("generated an exception: ", exc) else: utts.update(data) json.dump({"utts": utts}, args.output, indent=4) if __name__ == "__main__": main()
COCO-LM/fairseq/examples/speech_recognition/datasets/asr_prep_json.py/0
{ "file_path": "COCO-LM/fairseq/examples/speech_recognition/datasets/asr_prep_json.py", "repo_id": "COCO-LM", "token_count": 1667 }
185
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import csv from pathlib import Path import zipfile from functools import reduce from multiprocessing import cpu_count from typing import Any, Dict, List, Optional, Union import numpy as np import pandas as pd import sentencepiece as sp from fairseq.data.audio.audio_utils import ( _convert_to_mono, _get_kaldi_fbank, _get_torchaudio_fbank ) import torch from tqdm import tqdm UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3 BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0 EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2 PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1 def gen_vocab( input_path: Path, output_path_prefix: Path, model_type="bpe", vocab_size=1000, special_symbols: Optional[List[str]] = None ): # Train SentencePiece Model arguments = [ f"--input={input_path.as_posix()}", f"--model_prefix={output_path_prefix.as_posix()}", f"--model_type={model_type}", f"--vocab_size={vocab_size}", "--character_coverage=1.0", f"--num_threads={cpu_count()}", f"--unk_id={UNK_TOKEN_ID}", f"--bos_id={BOS_TOKEN_ID}", f"--eos_id={EOS_TOKEN_ID}", f"--pad_id={PAD_TOKEN_ID}", ] if special_symbols is not None: _special_symbols = ",".join(special_symbols) arguments.append(f"--user_defined_symbols={_special_symbols}") sp.SentencePieceTrainer.Train(" ".join(arguments)) # Export fairseq dictionary spm = sp.SentencePieceProcessor() spm.Load(output_path_prefix.as_posix() + ".model") vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())} assert ( vocab.get(UNK_TOKEN_ID) == UNK_TOKEN and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN ) vocab = { i: s for i, s in vocab.items() if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN} } with open(output_path_prefix.as_posix() + ".txt", "w") as f_out: for _, s in sorted(vocab.items(), key=lambda x: x[0]): f_out.write(f"{s} 1\n") def extract_fbank_features( waveform: torch.FloatTensor, sample_rate: int, output_path: Optional[Path] = None, n_mel_bins: int = 80, overwrite: bool = False, ): if output_path is not None and output_path.is_file() and not overwrite: return _waveform = _convert_to_mono(waveform, sample_rate) _waveform = _waveform * (2 ** 15) # Kaldi compliance: 16-bit signed integers _waveform = _waveform.numpy() features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins) if features is None: features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable fbank feature extraction" ) if output_path is not None: np.save(output_path.as_posix(), features) else: return features def create_zip(data_root: Path, zip_path: Path): paths = list(data_root.glob("*.npy")) with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f: for path in tqdm(paths): f.write(path, arcname=path.name) def is_npy_data(data: bytes) -> bool: return data[0] == 147 and data[1] == 78 def get_zip_manifest(zip_path: Path, zip_root: Optional[Path] = None): _zip_path = zip_path if zip_root is None else Path.joinpath(zip_root, zip_path) with zipfile.ZipFile(_zip_path, mode="r") as f: info = f.infolist() manifest = {} for i in tqdm(info): utt_id = Path(i.filename).stem offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size manifest[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}" with open(_zip_path, "rb") as f: f.seek(offset) data = f.read(file_size) assert len(data) > 1 and is_npy_data(data) return manifest def gen_config_yaml( manifest_root: Path, spm_filename: str, yaml_filename: str = "config.yaml", specaugment_policy: str = "lb", prepend_tgt_lang_tag: bool = False, sampling_alpha: float = 1.0, audio_root: str = "", cmvn_type: str = "utterance", gcmvn_path: Optional[Path] = None, ): manifest_root = manifest_root.absolute() writer = S2TDataConfigWriter(manifest_root / yaml_filename) writer.set_vocab_filename(spm_filename.replace(".model", ".txt")) writer.set_input_channels(1) writer.set_input_feat_per_channel(80) specaugment_setters = { "lb": writer.set_specaugment_lb_policy, "ld": writer.set_specaugment_ld_policy, "sm": writer.set_specaugment_sm_policy, "ss": writer.set_specaugment_ss_policy, } specaugment_setter = specaugment_setters.get(specaugment_policy, None) if specaugment_setter is not None: specaugment_setter() writer.set_bpe_tokenizer( { "bpe": "sentencepiece", "sentencepiece_model": (manifest_root / spm_filename).as_posix(), } ) if prepend_tgt_lang_tag: writer.set_prepend_tgt_lang_tag(True) writer.set_sampling_alpha(sampling_alpha) if cmvn_type not in ["global", "utterance"]: raise NotImplementedError writer.set_feature_transforms("_train", [f"{cmvn_type}_cmvn", "specaugment"]) writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"]) if cmvn_type == "global": assert gcmvn_path is not None, ( 'Please provide path of global cmvn file.' ) writer.set_global_cmvn(str(gcmvn_path)) if len(audio_root) > 0: writer.set_audio_root(audio_root) writer.flush() def load_df_from_tsv(path: Union[str, Path]): _path = path if isinstance(path, str) else path.as_posix() return pd.read_csv( _path, sep="\t", header=0, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, na_filter=False, ) def save_df_to_tsv(dataframe, path: Union[str, Path]): _path = path if isinstance(path, str) else path.as_posix() dataframe.to_csv( _path, sep="\t", header=True, index=False, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, ) def filter_manifest_df( df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000 ): filters = { "no speech": df["audio"] == "", f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames, "empty sentence": df["tgt_text"] == "", } if is_train_split: filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames if extra_filters is not None: filters.update(extra_filters) invalid = reduce(lambda x, y: x | y, filters.values()) valid = ~invalid print( "| " + ", ".join(f"{n}: {f.sum()}" for n, f in filters.items()) + f", total {invalid.sum()} filtered, {valid.sum()} remained." ) return df[valid] def cal_gcmvn_stats(features_list): features = np.concatenate(features_list) square_sums = (features ** 2).sum(axis=0) mean = features.mean(axis=0) features = np.subtract(features, mean) var = square_sums / features.shape[0] - mean ** 2 std = np.sqrt(np.maximum(var, 1e-8)) return {"mean": mean.astype("float32"), "std": std.astype("float32")} class S2TDataConfigWriter(object): DEFAULT_VOCAB_FILENAME = "dict.txt" DEFAULT_INPUT_FEAT_PER_CHANNEL = 80 DEFAULT_INPUT_CHANNELS = 1 def __init__(self, yaml_path: Path): try: import yaml except ImportError: print("Please install PyYAML for S2T data config YAML files") self.yaml = yaml self.yaml_path = yaml_path self.config = {} def flush(self): with open(self.yaml_path, "w") as f: self.yaml.dump(self.config, f) def set_audio_root(self, audio_root=""): self.config["audio_root"] = audio_root def set_vocab_filename(self, vocab_filename: str = "dict.txt"): self.config["vocab_filename"] = vocab_filename def set_specaugment( self, time_wrap_w: int, freq_mask_n: int, freq_mask_f: int, time_mask_n: int, time_mask_t: int, time_mask_p: float, ): self.config["specaugment"] = { "time_wrap_W": time_wrap_w, "freq_mask_N": freq_mask_n, "freq_mask_F": freq_mask_f, "time_mask_N": time_mask_n, "time_mask_T": time_mask_t, "time_mask_p": time_mask_p, } def set_specaugment_lb_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=1, freq_mask_f=27, time_mask_n=1, time_mask_t=100, time_mask_p=1.0, ) def set_specaugment_ld_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=27, time_mask_n=2, time_mask_t=100, time_mask_p=1.0, ) def set_specaugment_sm_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=15, time_mask_n=2, time_mask_t=70, time_mask_p=0.2, ) def set_specaugment_ss_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=27, time_mask_n=2, time_mask_t=70, time_mask_p=0.2, ) def set_input_channels(self, input_channels: int = 1): self.config["input_channels"] = input_channels def set_input_feat_per_channel(self, input_feat_per_channel: int = 80): self.config["input_feat_per_channel"] = input_feat_per_channel def set_bpe_tokenizer(self, bpe_tokenizer: Dict[str, Any]): self.config["bpe_tokenizer"] = bpe_tokenizer def set_global_cmvn(self, stats_npz_path: str): self.config["global_cmvn"] = {"stats_npz_path": stats_npz_path} def set_feature_transforms(self, split: str, transforms: List[str]): if "transforms" not in self.config: self.config["transforms"] = {} self.config["transforms"][split] = transforms def set_prepend_tgt_lang_tag(self, flag: bool = True): self.config["prepend_tgt_lang_tag"] = flag def set_sampling_alpha(self, sampling_alpha: float = 1.0): self.config["sampling_alpha"] = sampling_alpha
COCO-LM/fairseq/examples/speech_to_text/data_utils.py/0
{ "file_path": "COCO-LM/fairseq/examples/speech_to_text/data_utils.py", "repo_id": "COCO-LM", "token_count": 5149 }
186
#!/bin/bash # Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh echo 'Cloning Moses github repository (for tokenization scripts)...' git clone https://github.com/moses-smt/mosesdecoder.git echo 'Cloning Subword NMT repository (for BPE pre-processing)...' git clone https://github.com/rsennrich/subword-nmt.git SCRIPTS=mosesdecoder/scripts TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl CLEAN=$SCRIPTS/training/clean-corpus-n.perl NORM_PUNC=$SCRIPTS/tokenizer/normalize-punctuation.perl REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl BPEROOT=subword-nmt/subword_nmt BPE_TOKENS=40000 URLS=( "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz" "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz" "http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz" "http://data.statmt.org/wmt17/translation-task/dev.tgz" "http://statmt.org/wmt14/test-full.tgz" ) FILES=( "training-parallel-europarl-v7.tgz" "training-parallel-commoncrawl.tgz" "training-parallel-nc-v12.tgz" "dev.tgz" "test-full.tgz" ) CORPORA=( "training/europarl-v7.de-en" "commoncrawl.de-en" "training/news-commentary-v12.de-en" ) # This will make the dataset compatible to the one used in "Convolutional Sequence to Sequence Learning" # https://arxiv.org/abs/1705.03122 if [ "$1" == "--icml17" ]; then URLS[2]="http://statmt.org/wmt14/training-parallel-nc-v9.tgz" FILES[2]="training-parallel-nc-v9.tgz" CORPORA[2]="training/news-commentary-v9.de-en" OUTDIR=wmt14_en_de else OUTDIR=wmt17_en_de fi if [ ! -d "$SCRIPTS" ]; then echo "Please set SCRIPTS variable correctly to point to Moses scripts." exit fi src=en tgt=de lang=en-de prep=$OUTDIR tmp=$prep/tmp orig=orig dev=dev/newstest2013 mkdir -p $orig $tmp $prep cd $orig for ((i=0;i<${#URLS[@]};++i)); do file=${FILES[i]} if [ -f $file ]; then echo "$file already exists, skipping download" else url=${URLS[i]} wget "$url" if [ -f $file ]; then echo "$url successfully downloaded." else echo "$url not successfully downloaded." exit -1 fi if [ ${file: -4} == ".tgz" ]; then tar zxvf $file elif [ ${file: -4} == ".tar" ]; then tar xvf $file fi fi done cd .. echo "pre-processing train data..." for l in $src $tgt; do rm $tmp/train.tags.$lang.tok.$l for f in "${CORPORA[@]}"; do cat $orig/$f.$l | \ perl $NORM_PUNC $l | \ perl $REM_NON_PRINT_CHAR | \ perl $TOKENIZER -threads 8 -a -l $l >> $tmp/train.tags.$lang.tok.$l done done echo "pre-processing test data..." for l in $src $tgt; do if [ "$l" == "$src" ]; then t="src" else t="ref" fi grep '<seg id' $orig/test-full/newstest2014-deen-$t.$l.sgm | \ sed -e 's/<seg id="[0-9]*">\s*//g' | \ sed -e 's/\s*<\/seg>\s*//g' | \ sed -e "s/\’/\'/g" | \ perl $TOKENIZER -threads 8 -a -l $l > $tmp/test.$l echo "" done echo "splitting train and valid..." for l in $src $tgt; do awk '{if (NR%100 == 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/valid.$l awk '{if (NR%100 != 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/train.$l done TRAIN=$tmp/train.de-en BPE_CODE=$prep/code rm -f $TRAIN for l in $src $tgt; do cat $tmp/train.$l >> $TRAIN done echo "learn_bpe.py on ${TRAIN}..." python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE for L in $src $tgt; do for f in train.$L valid.$L test.$L; do echo "apply_bpe.py to ${f}..." python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $tmp/bpe.$f done done perl $CLEAN -ratio 1.5 $tmp/bpe.train $src $tgt $prep/train 1 250 perl $CLEAN -ratio 1.5 $tmp/bpe.valid $src $tgt $prep/valid 1 250 for L in $src $tgt; do cp $tmp/bpe.test.$L $prep/test.$L done
COCO-LM/fairseq/examples/translation/prepare-wmt14en2de.sh/0
{ "file_path": "COCO-LM/fairseq/examples/translation/prepare-wmt14en2de.sh", "repo_id": "COCO-LM", "token_count": 1897 }
187
# wav2vec 2.0 wav2vec 2.0 learns speech representations on unlabeled data as described in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations (Baevski et al., 2020)](https://arxiv.org/abs/2006.11477). We learned speech representations in multiple languages as well in [Unsupervised Cross-lingual Representation Learning for Speech Recognition (Conneau et al., 2020)](https://arxiv.org/abs/2006.13979). We also combined wav2vec 2.0 with self-training in [Self-training and Pre-training are Complementary for Speech Recognition (Xu et al., 2020)](https://arxiv.org/abs/2010.11430). ## Pre-trained models Model | Finetuning split | Dataset | Model |---|---|---|--- Wav2Vec 2.0 Base | No finetuning | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt) Wav2Vec 2.0 Base | 10 minutes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_10m.pt) Wav2Vec 2.0 Base | 100 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_100h.pt) Wav2Vec 2.0 Base | 960 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small_960h.pt) Wav2Vec 2.0 Large | No finetuning | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/libri960_big.pt) Wav2Vec 2.0 Large | 10 minutes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_10m.pt) Wav2Vec 2.0 Large | 100 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_100h.pt) Wav2Vec 2.0 Large | 960 hours | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_big_960h.pt) Wav2Vec 2.0 Large (LV-60)* | No finetuning | [Libri-Light](https://github.com/facebookresearch/libri-light) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt) Wav2Vec 2.0 Large (LV-60)* | 10 minutes | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m_new.pt) Wav2Vec 2.0 Large (LV-60)* | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h_new.pt) Wav2Vec 2.0 Large (LV-60)* | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec2_vox_960h_new.pt) Wav2Vec 2.0 Large (LV-60) + Self Training * | 10 minutes | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_10m_pl.pt) Wav2Vec 2.0 Large (LV-60) + Self Training * | 100 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_100h_pl.pt) Wav2Vec 2.0 Large (LV-60) + Self Training * | 960 hours | [Libri-Light](https://github.com/facebookresearch/libri-light) + [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_960h_pl.pt) \* updated (Oct. 24, 2020) We also release multilingual pre-trained wav2vec 2.0 (XLSR) models: Model | Architecture | Hours | Languages | Datasets | Model |---|---|---|---|---|--- XLSR-53 | Large | 56k | 53 | MLS, CommonVoice, BABEL | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt) The XLSR model uses the following datasets for multilingual pretraining: * **[MLS: Multilingual LibriSpeech](https://indico2.conference4me.psnc.pl/event/35/contributions/3585/attachments/1060/1101/Wed-2-6-10.pdf)** (8 languages, 50.7k hours): *Dutch, English, French, German, Italian, Polish, Portuguese, Spanish* * **[CommonVoice](https://commonvoice.mozilla.org/en/languages)** (36 languages, 3.6k hours): *Arabic, Basque, Breton, Chinese (CN), Chinese (HK), Chinese (TW), Chuvash, Dhivehi, Dutch, English, Esperanto, Estonian, French, German, Hakh-Chin, Indonesian, Interlingua, Irish, Italian, Japanese, Kabyle, Kinyarwanda, Kyrgyz, Latvian, Mongolian, Persian, Portuguese, Russian, Sakha, Slovenian, Spanish, Swedish, Tamil, Tatar, Turkish, Welsh* (see also [finetuning splits]([https://dl.fbaipublicfiles.com/cpc_audio/common_voices_splits.tar.gz]) from [this paper](https://arxiv.org/abs/2002.02848)). * **[Babel](https://catalog.ldc.upenn.edu/byyear)** (17 languages, 1.7k hours): *Assamese, Bengali, Cantonese, Cebuano, Georgian, Haitian, Kazakh, Kurmanji, Lao, Pashto, Swahili, Tagalog, Tamil, Tok, Turkish, Vietnamese, Zulu* ## Training a new model with the CLI tools Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate file 10 to 30 seconds in length) ### Prepare training data manifest: First, install the `soundfile` library: ```shell script pip install soundfile ``` Next, run: ```shell script $ python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext $ext --valid-percent $valid ``` $ext should be set to flac, wav, or whatever format your dataset happens to use that soundfile can read. $valid should be set to some reasonable percentage (like 0.01) of training data to use for validation. To use a pre-defined validation set (like dev-other from librispeech), set to it 0 and then overwrite valid.tsv with a separately pre-processed manifest file. ### Train a wav2vec 2.0 base model: This configuration was used for the base model trained on the Librispeech dataset in the wav2vec 2.0 paper Note that the input is expected to be single channel, sampled at 16 kHz ```shell script $ fairseq-hydra-train \ task.data=/path/to/data \ --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \ --config-name wav2vec2_base_librispeech ``` Note: you can simulate 64 GPUs by using k GPUs and adding command line parameters (before `--config-dir`) `distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 64/k ### Train a wav2vec 2.0 large model: This configuration was used for the large model trained on the Libri-light dataset in the wav2vec 2.0 paper ```shell script $ fairseq-hydra-train \ task.data=/path/to/data \ --config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \ --config-name wav2vec2_large_librivox ``` Note: you can simulate 128 GPUs by using k GPUs and adding command line parameters (before `--config-dir`) `distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 128/k ### Fine-tune a pre-trained model with CTC: Fine-tuning a model requires parallel audio and labels file, as well as a vocabulary file in fairseq format. A letter vocabulary can be downloaded [here](https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt). An example [script](libri_labels.py) that generates labels for the Librispeech dataset from the tsv file produced by wav2vec_manifest.py can be used as follows: ```shell script split=train $ python libri_labels.py /path/to/tsv --output-dir /output/dir --output-name $split ``` Fine-tuning on 100h of Librispeech with letter targets: ```shell script $ fairseq-hydra-train \ distributed_training.distributed_port=$PORT \ task.data=/path/to/data \ model.w2v_path=/path/to/model.pt \ --config-dir /path/to/fairseq-py/examples/wav2vec/config/finetuning \ --config-name base_100h ``` There are other config files in the config/finetuning directory that can be used to fine-tune on other splits. You can specify the right config via the `--config-name` parameter. Note: you can simulate 24 GPUs by using k GPUs and adding command line parameters (before `--config-dir`) `distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 24/k Decoding with a language model during training requires flashlight [python bindings](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) (previously called [wav2letter](https://github.com/facebookresearch/wav2letter). If you want to use a language model, add `+criterion.wer_args='[/path/to/kenlm, /path/to/lexicon, 2, -1]'` to the command line. ### Evaluating a CTC model: Evaluating a CTC model with a language model requires [flashlight python bindings](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) (previously called [wav2letter](https://github.com/facebookresearch/wav2letter) to be installed. Fairseq transformer language model used in the wav2vec 2.0 paper can be obtained from the [wav2letter model repository](https://github.com/facebookresearch/wav2letter/tree/master/recipes/sota/2019). Be sure to upper-case the language model vocab after downloading it. Letter dictionary for pre-trained models can be found [here](https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt). Next, run the evaluation command: ```shell script $subset=dev_other python examples/speech_recognition/infer.py /checkpoint/abaevski/data/speech/libri/10h/wav2vec/raw --task audio_pretraining \ --nbest 1 --path /path/to/model --gen-subset $subset --results-path /path/to/save/results/for/sclite --w2l-decoder kenlm \ --lm-model /path/to/kenlm.bin --lm-weight 2 --word-score -1 --sil-weight 0 --criterion ctc --labels ltr --max-tokens 4000000 \ --post-process letter ``` To get raw numbers, use --w2l-decoder viterbi and omit the lexicon. To use the transformer language model, use --w2l-decoder fairseqlm. ## Use wav2vec 2.0 with 🤗Transformers: Wav2Vec2 is also available in the [🤗Transformers library](https://github.com/huggingface/transformers) since version 4.3. Pretrained Models can be found on the [hub](https://huggingface.co/models?filter=wav2vec2) and documentation can be found [here](https://huggingface.co/transformers/master/model_doc/wav2vec2.html). Usage example: ```python # !pip install transformers import soundfile as sf import torch from transformers import Wav2Vec2ForMaskedLM, Wav2Vec2Tokenizer # load pretrained model tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2ForMaskedLM.from_pretrained("facebook/wav2vec2-base-960h") # load audio audio_input, _ = sf.read("path/to/audio/file") # transcribe input_values = tokenizer(audio_input, return_tensors="pt").input_values logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = tokenizer.batch_decode(predicted_ids)[0] ``` # wav2vec Example to train a wav2vec model as described in [wav2vec: Unsupervised Pre-training for Speech Recognition (Schneider et al., 2019)](https://arxiv.org/abs/1904.05862). ## Pre-trained models Description | Dataset | Model ---|---|--- Wav2Vec large | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_large.pt) #### Example usage: ```python import torch import fairseq cp_path = '/path/to/wav2vec.pt' model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path]) model = model[0] model.eval() wav_input_16khz = torch.randn(1,10000) z = model.feature_extractor(wav_input_16khz) c = model.feature_aggregator(z) ``` ## Training a new model with the CLI tools Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate files 10 to 30 seconds in length) ### Prepare training data manifest: ``` $ python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext wav ``` ### Train a wav2vec model: ``` $ python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \ --arch wav2vec --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \ --conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \ --conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ --skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \ --max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test ``` ### Run wav2vec2 pre-training on Google Cloud TPUs: Wav2Vec2 is now supported on TPUs! It's currently pre-training only. #### Using hydra on a v3-8: ``` $ OMP_NUM_THREADS=1 fairseq-hydra-train \ task.data=/manifest/path \ --config-dir /PATH/TO/FAIRSEQ/examples/wav2vec/config/pretraining \ --config-name wav2vec2_large_librivox_tpu.yaml ``` #### Using command line arguments on a v3-8: ``` $ OMP_NUM_THREADS=1 python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \ --arch wav2vec2 --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \ --conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \ --conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ --skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \ --max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test \ --tpu --distributed-world-size 8 --num-batch-buckets 3 --enable-padding \ --encoder-layerdrop 0 --mask-channel-prob 0.1 ``` #### Using hydra on a pod slice (v3-N with N > 8): ``` $ OMP_NUM_THREADS=1 fairseq-hydra-train \ task.data=/manifest/path \ --config-dir /PATH/TO/FAIRSEQ/examples/wav2vec/config/pretraining \ --config-name wav2vec2_large_librivox_tpu-pod.yaml # edit distributed-world-size accordingly ``` #### Using command line arguments on a pod slice (v3-N with N > 8): ``` $ python -m torch_xla.distributed.xla_dist \ --tpu ${TPUNAME} --conda-env=torch-xla-${TORCH_XLA_VERSION} --env OMP_NUM_THREADS=1 \ -- \ python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 --save-interval 1 --no-epoch-checkpoints \ --arch wav2vec2 --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 --optimizer adam --lr 0.005 --lr-scheduler cosine \ --conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1)] \ --conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ --skip-connections-agg --residual-scale 0.5 --log-compression --warmup-updates 500 --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 \ --max-sample-size 150000 --max-tokens 1500000 --skip-invalid-size-inputs-valid-test \ --tpu --distributed-world-size ${WORLD_SIZE} --num-batch-buckets 3 --enable-padding \ --encoder-layerdrop 0 --mask-channel-prob 0.1 ``` ### Extract embeddings from the downstream task data: ``` $ PYTHONPATH=/path/to/fairseq python examples/wav2vec/wav2vec_featurize.py --input /path/to/task/waves --output /path/to/output \ --model /model/path/checkpoint_best.pt --split train valid test ``` # vq-wav2vec Example to train a vq-wav2vec model as described in [vq-wav2vec: Self-Supervised Learning of Discrete Speech Representations (Baevski et al., 2019)](https://arxiv.org/abs/1910.05453). These models are also used in [Effectiveness of self-supervised pre-training for speech recognition (Baevski et al., 2019)](https://arxiv.org/abs/1911.03912). ## Pre-trained models Description | Dataset | Model ---|---|--- vq-wav2vec Gumbel | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec.pt) vq-wav2vec K-means | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt) Roberta on K-means codes | [Librispeech](http://www.openslr.org/12) | [download](https://dl.fbaipublicfiles.com/fairseq/wav2vec/bert_kmeans.tar) #### Example usage: ```python import torch import fairseq cp = torch.load('/path/to/vq-wav2vec.pt') model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp]) model = model[0] model.eval() wav_input_16khz = torch.randn(1,10000) z = model.feature_extractor(wav_input_16khz) _, idxs = model.vector_quantizer.forward_idx(z) print(idxs.shape) # output: torch.Size([1, 60, 2]), 60 timesteps with 2 indexes corresponding to 2 groups in the model ``` ## Training a new model with the CLI tools Given a directory containing wav files to be used for pretraining (we recommend splitting each file into separate file 10 to 30 seconds in length) ### Prepare training data manifest: ``` $ python examples/wav2vec/wav2vec_manifest.py /path/to/waves --dest /manifest/path --ext wav ``` ### Train a gumbel vq-wav2vec model: ``` $ python train.py /manifest/path --save-dir /model/path --num-workers 6 --fp16 --max-update 400000 \ --save-interval 1 --no-epoch-checkpoints --arch wav2vec --task audio_pretraining --min-lr 1e-06 --stop-min-lr 1e-09 \ --optimizer adam --lr 1e-05 --lr-scheduler cosine \ --conv-feature-layers [(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)] \ --conv-aggregator-layers [(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)] \ --activation gelu --offset auto --skip-connections-agg --residual-scale 0.5 \ --log-keys ["prob_perplexity","code_perplexity","temp"] --vq-type gumbel --vq-groups 2 --vq-depth 2 \ --combine-groups --vq-vars 320 --vq-temp (2,0.5,0.999995) --prediction-steps 12 --warmup-updates 1000 \ --warmup-init-lr 1e-07 --criterion wav2vec --num-negatives 10 --max-sample-size 150000 \ --max-tokens 300000 --cross-sample-negatives 0 --update-freq 1 --seed 2 --skip-invalid-size-inputs-valid-test ``` for k-means training, set vq-type with "kmeans" and add --loss-weights [1] argument. Pre-trained models were trained on 16 GPUs. ### Tokenize audio data (e.g. for BERT training): ``` $ PYTHONPATH=/path/to/fairseq python examples/wav2vec/vq-wav2vec_featurize.py --data-dir /manifest/path --output-dir /path/to/output \ --checkpoint /model/path/checkpoint_best.pt --split train valid test --extension tsv ```
COCO-LM/fairseq/examples/wav2vec/README.md/0
{ "file_path": "COCO-LM/fairseq/examples/wav2vec/README.md", "repo_id": "COCO-LM", "token_count": 6946 }
188
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset """ import argparse import glob import os import os.path as osp import pprint import soundfile as sf import torch import fairseq from torch import nn from torch.utils.data import DataLoader try: import tqdm except: print("Install tqdm to use --log-format=tqdm") class FilesDataset: def __init__(self, files, labels): self.files = files if labels and osp.exists(labels): with open(labels, "r") as lbl_f: self.labels = [line.rstrip() for line in lbl_f] else: self.labels = labels def __len__(self): return len(self.files) def __getitem__(self, index): fname = self.files[index] wav, sr = sf.read(fname) assert sr == 16000 wav = torch.from_numpy(wav).float() lbls = None if self.labels: if isinstance(self.labels, str): lbl_file = osp.splitext(fname)[0] + "." + self.labels with open(lbl_file, "r") as lblf: lbls = lblf.readline() assert lbls is not None else: lbls = self.labels[index] return wav, lbls def collate(self, batch): return batch class ArgTypes: @staticmethod def existing_path(arg): arg = str(arg) assert osp.exists(arg), f"File {arg} does not exist" return arg @staticmethod def mkdir(arg): arg = str(arg) os.makedirs(arg, exist_ok=True) return arg class DatasetWriter: def __init__(self): self.args = self.load_config() pprint.pprint(self.args.__dict__) self.model = self.load_model() def __getattr__(self, attr): return getattr(self.args, attr) def read_manifest(self, fname): with open(fname, "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() fnames = [ osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0 ] return fnames def process_splits(self): if self.args.shard is not None or self.args.num_shards is not None: assert self.args.shard is not None and self.args.num_shards is not None for split in self.splits: print(split) if self.extension == "tsv": datadir = osp.join(self.data_dir, f"{split}.{self.extension}") print("Reading manifest file: ", datadir) files = self.read_manifest(datadir) else: datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}") files = glob.glob(datadir, recursive=True) assert len(files) > 0 if self.args.shard is not None: files = files[self.args.shard :: self.args.num_shards] lbls = [] with open(self.data_file(split), "w") as srcf: for line, lbl in self.iterate(files): print(line, file=srcf) if self.args.labels: lbls.append(lbl + "\n") if self.args.labels: assert all(a is not None for a in lbls) with open(self.lbl_file(split), "w") as lblf: lblf.writelines(lbls) def iterate(self, files): data = self.load_data(files) for samples in tqdm.tqdm(data, total=len(files) // 32): for wav, lbl in samples: x = wav.unsqueeze(0).float().cuda() div = 1 while x.size(-1) // div > self.args.max_size: div += 1 xs = x.chunk(div, dim=-1) result = [] for x in xs: torch.cuda.empty_cache() x = self.model.feature_extractor(x) if self.quantize_location == "encoder": with torch.no_grad(): _, idx = self.model.vector_quantizer.forward_idx(x) idx = idx.squeeze(0).cpu() else: with torch.no_grad(): z = self.model.feature_aggregator(x) _, idx = self.model.vector_quantizer.forward_idx(z) idx = idx.squeeze(0).cpu() result.append(idx) idx = torch.cat(result, dim=0) yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl def lbl_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.lbl{shard_part}") def data_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.src{shard_part}") def var_file(self): return osp.join(self.output_dir, f"vars.pt") def load_config(self): parser = argparse.ArgumentParser("Vector Quantized wav2vec features") # Model Arguments parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True) parser.add_argument("--data-parallel", action="store_true") # Output Arguments parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True) # Data Arguments parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True) parser.add_argument("--splits", type=str, nargs="+", required=True) parser.add_argument("--extension", type=str, required=True) parser.add_argument("--labels", type=str, required=False) parser.add_argument("--shard", type=int, default=None) parser.add_argument("--num-shards", type=int, default=None) parser.add_argument("--max-size", type=int, default=1300000) # Logger Arguments parser.add_argument( "--log-format", type=str, choices=["none", "simple", "tqdm"] ) return parser.parse_args() def load_data(self, fnames): dataset = FilesDataset(fnames, self.args.labels) loader = DataLoader( dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8 ) return loader def load_model(self): model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint]) model = model[0] self.quantize_location = getattr(cfg.model, "vq", "encoder") model.eval().float() model.cuda() if self.data_parallel: model = nn.DataParallel(model) return model def __call__(self): self.process_splits() if hasattr(self.model.feature_extractor, "vars") and ( self.args.shard is None or self.args.shard == 0 ): vars = ( self.model.feature_extractor.vars.view( self.model.feature_extractor.banks, self.model.feature_extractor.num_vars, -1, ) .cpu() .detach() ) print("writing learned latent variable embeddings: ", vars.shape) torch.save(vars, self.var_file()) if __name__ == "__main__": write_data = DatasetWriter() write_data() print("Done.")
COCO-LM/fairseq/examples/wav2vec/vq-wav2vec_featurize.py/0
{ "file_path": "COCO-LM/fairseq/examples/wav2vec/vq-wav2vec_featurize.py", "repo_id": "COCO-LM", "token_count": 3859 }
189
/** * Copyright 2017-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the license found in the * LICENSE file in the root directory of this source tree. */ #include <map> #include <array> #include <cstring> #include <cstdio> typedef struct { size_t reflen; size_t predlen; size_t match1; size_t count1; size_t match2; size_t count2; size_t match3; size_t count3; size_t match4; size_t count4; } bleu_stat; // left trim (remove pad) void bleu_ltrim(size_t* len, int** sent, int pad) { size_t start = 0; while(start < *len) { if (*(*sent + start) != pad) { break; } start++; } *sent += start; *len -= start; } // right trim remove (eos) void bleu_rtrim(size_t* len, int** sent, int pad, int eos) { size_t end = *len - 1; while (end > 0) { if (*(*sent + end) != eos && *(*sent + end) != pad) { break; } end--; } *len = end + 1; } // left and right trim void bleu_trim(size_t* len, int** sent, int pad, int eos) { bleu_ltrim(len, sent, pad); bleu_rtrim(len, sent, pad, eos); } size_t bleu_hash(int len, int* data) { size_t h = 14695981039346656037ul; size_t prime = 0x100000001b3; char* b = (char*) data; size_t blen = sizeof(int) * len; while (blen-- > 0) { h ^= *b++; h *= prime; } return h; } void bleu_addngram( size_t *ntotal, size_t *nmatch, size_t n, size_t reflen, int* ref, size_t predlen, int* pred) { if (predlen < n) { return; } predlen = predlen - n + 1; (*ntotal) += predlen; if (reflen < n) { return; } reflen = reflen - n + 1; std::map<size_t, size_t> count; while (predlen > 0) { size_t w = bleu_hash(n, pred++); count[w]++; predlen--; } while (reflen > 0) { size_t w = bleu_hash(n, ref++); if (count[w] > 0) { (*nmatch)++; count[w] -=1; } reflen--; } } extern "C" { #ifdef _WIN64 __declspec(dllexport) #endif void bleu_zero_init(bleu_stat* stat) { std::memset(stat, 0, sizeof(bleu_stat)); } #ifdef _WIN64 __declspec(dllexport) #endif void bleu_one_init(bleu_stat* stat) { bleu_zero_init(stat); stat->count1 = 0; stat->count2 = 1; stat->count3 = 1; stat->count4 = 1; stat->match1 = 0; stat->match2 = 1; stat->match3 = 1; stat->match4 = 1; } #ifdef _WIN64 __declspec(dllexport) #endif void bleu_add( bleu_stat* stat, size_t reflen, int* ref, size_t predlen, int* pred, int pad, int eos) { bleu_trim(&reflen, &ref, pad, eos); bleu_trim(&predlen, &pred, pad, eos); stat->reflen += reflen; stat->predlen += predlen; bleu_addngram(&stat->count1, &stat->match1, 1, reflen, ref, predlen, pred); bleu_addngram(&stat->count2, &stat->match2, 2, reflen, ref, predlen, pred); bleu_addngram(&stat->count3, &stat->match3, 3, reflen, ref, predlen, pred); bleu_addngram(&stat->count4, &stat->match4, 4, reflen, ref, predlen, pred); } }
COCO-LM/fairseq/fairseq/clib/libbleu/libbleu.cpp/0
{ "file_path": "COCO-LM/fairseq/fairseq/clib/libbleu/libbleu.cpp", "repo_id": "COCO-LM", "token_count": 1312 }
190
# @package _group_ activation_fn: "relu" dropout: 0.3 attention_dropout: 0.1 activation_dropout: 0.1 relu_dropout: 0.1 decoder_embed_dim: 1024 decoder_output_dim: 1024 decoder_input_dim: 1024 decoder_ffn_embed_dim: 4096 decoder_layers: 16 decoder_attention_heads: 8 decoder_normalize_before: true no_decoder_final_norm: true adaptive_softmax_cutoff: "20000,60000" adaptive_softmax_dropout: 0.2 adaptive_softmax_factor: 4 no_token_positional_embeddings: false share_decoder_input_output_embed: false character_embeddings: false character_filters: "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]" character_embedding_dim: 4 char_embedder_highway_layers: 2 adaptive_input: true adaptive_input_factor: 4 adaptive_input_cutoff: "20000,60000" tie_adaptive_weights: true tie_adaptive_proj: true decoder_learned_pos: false decoder_layerdrop: 0 decoder_layers_to_keep: null layernorm_embedding: false no_scale_embedding: false quant_noise_pq: 0 quant_noise_pq_block_size: 8 quant_noise_scalar: 0
COCO-LM/fairseq/fairseq/config/model/transformer_lm/transformer_lm_wiki103.yaml/0
{ "file_path": "COCO-LM/fairseq/fairseq/config/model/transformer_lm/transformer_lm_wiki103.yaml", "repo_id": "COCO-LM", "token_count": 403 }
191
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from torch import Tensor @register_criterion("nat_loss") class LabelSmoothedDualImitationCriterion(FairseqCriterion): def __init__(self, task, label_smoothing): super().__init__(task) self.label_smoothing = label_smoothing @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument( "--label-smoothing", default=0.0, type=float, metavar="D", help="epsilon for label smoothing, 0 means no label smoothing", ) def _compute_loss( self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0 ): """ outputs: batch x len x d_model targets: batch x len masks: batch x len policy_logprob: if there is some policy depends on the likelihood score as rewards. """ def mean_ds(x: Tensor, dim=None) -> Tensor: return ( x.float().mean().type_as(x) if dim is None else x.float().mean(dim).type_as(x) ) if masks is not None: outputs, targets = outputs[masks], targets[masks] if masks is not None and not masks.any(): nll_loss = torch.tensor(0) loss = nll_loss else: logits = F.log_softmax(outputs, dim=-1) if targets.dim() == 1: losses = F.nll_loss(logits, targets.to(logits.device), reduction="none") else: # soft-labels losses = F.kl_div(logits, targets.to(logits.device), reduction="none") losses = losses.sum(-1) nll_loss = mean_ds(losses) if label_smoothing > 0: loss = ( nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing ) else: loss = nll_loss loss = loss * factor return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor} def _custom_loss(self, loss, name="loss", factor=1.0): return {"name": name, "loss": loss, "factor": factor} def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ nsentences, ntokens = sample["nsentences"], sample["ntokens"] # B x T src_tokens, src_lengths = ( sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"], ) tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"] outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens) losses, nll_loss = [], [] for obj in outputs: if outputs[obj].get("loss", None) is None: _losses = self._compute_loss( outputs[obj].get("out"), outputs[obj].get("tgt"), outputs[obj].get("mask", None), outputs[obj].get("ls", 0.0), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) else: _losses = self._custom_loss( outputs[obj].get("loss"), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) losses += [_losses] if outputs[obj].get("nll_loss", False): nll_loss += [_losses.get("nll_loss", 0.0)] loss = sum(l["loss"] for l in losses) nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0) # NOTE: # we don't need to use sample_size as denominator for the gradient # here sample_size is just used for logging sample_size = 1 logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } for l in losses: logging_output[l["name"]] = ( utils.item(l["loss"].data / l["factor"]) if reduce else l[["loss"]].data / l["factor"] ) return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs)) metrics.log_scalar( "loss", loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) for key in logging_outputs[0]: if key[-5:] == "-loss": val = sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar( key[:-5], val / sample_size / math.log(2) if sample_size > 0 else 0.0, sample_size, round=3, ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
COCO-LM/fairseq/fairseq/criterions/nat_loss.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/criterions/nat_loss.py", "repo_id": "COCO-LM", "token_count": 3101 }
192
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from . import FairseqDataset def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True): """Backtranslate a list of samples. Given an input (*samples*) of the form: [{'id': 1, 'source': 'hallo welt'}] this will return: [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}] Args: samples (List[dict]): samples to backtranslate. Individual samples are expected to have a 'source' key, which will become the 'target' after backtranslation. collate_fn (callable): function to collate samples into a mini-batch generate_fn (callable): function to generate backtranslations cuda (bool): use GPU for generation (default: ``True``) Returns: List[dict]: an updated list of samples with a backtranslated source """ collated_samples = collate_fn(samples) s = utils.move_to_cuda(collated_samples) if cuda else collated_samples generated_sources = generate_fn(s) id_to_src = {sample["id"]: sample["source"] for sample in samples} # Go through each tgt sentence in batch and its corresponding best # generated hypothesis and create a backtranslation data pair # {id: id, source: generated backtranslation, target: original tgt} return [ { "id": id.item(), "target": id_to_src[id.item()], "source": hypos[0]["tokens"].cpu(), } for id, hypos in zip(collated_samples["id"], generated_sources) ] class BacktranslationDataset(FairseqDataset): """ Sets up a backtranslation dataset which takes a tgt batch, generates a src using a tgt-src backtranslation function (*backtranslation_fn*), and returns the corresponding `{generated src, input tgt}` batch. Args: tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be backtranslated. Only the source side of this dataset will be used. After backtranslation, the source sentences in this dataset will be returned as the targets. src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated sentences. tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of sentences to be backtranslated. backtranslation_fn (callable, optional): function to call to generate backtranslations. This is typically the `generate` method of a :class:`~fairseq.sequence_generator.SequenceGenerator` object. Pass in None when it is not available at initialization time, and use set_backtranslation_fn function to set it when available. output_collater (callable, optional): function to call on the backtranslated samples to create the final batch (default: ``tgt_dataset.collater``). cuda: use GPU for generation """ def __init__( self, tgt_dataset, src_dict, tgt_dict=None, backtranslation_fn=None, output_collater=None, cuda=True, **kwargs ): self.tgt_dataset = tgt_dataset self.backtranslation_fn = backtranslation_fn self.output_collater = ( output_collater if output_collater is not None else tgt_dataset.collater ) self.cuda = cuda if torch.cuda.is_available() else False self.src_dict = src_dict self.tgt_dict = tgt_dict def __getitem__(self, index): """ Returns a single sample from *tgt_dataset*. Note that backtranslation is not applied in this step; use :func:`collater` instead to backtranslate a batch of samples. """ return self.tgt_dataset[index] def __len__(self): return len(self.tgt_dataset) def set_backtranslation_fn(self, backtranslation_fn): self.backtranslation_fn = backtranslation_fn def collater(self, samples): """Merge and backtranslate a list of samples to form a mini-batch. Using the samples from *tgt_dataset*, load a collated target sample to feed to the backtranslation model. Then take the backtranslation with the best score as the source and the original input as the target. Note: we expect *tgt_dataset* to provide a function `collater()` that will collate samples into the format expected by *backtranslation_fn*. After backtranslation, we will feed the new list of samples (i.e., the `(backtranslated source, original source)` pairs) to *output_collater* and return the result. Args: samples (List[dict]): samples to backtranslate and collate Returns: dict: a mini-batch with keys coming from *output_collater* """ if samples[0].get("is_dummy", False): return samples samples = backtranslate_samples( samples=samples, collate_fn=self.tgt_dataset.collater, generate_fn=(lambda net_input: self.backtranslation_fn(net_input)), cuda=self.cuda, ) return self.output_collater(samples) def num_tokens(self, index): """Just use the tgt dataset num_tokens""" return self.tgt_dataset.num_tokens(index) def ordered_indices(self): """Just use the tgt dataset ordered_indices""" return self.tgt_dataset.ordered_indices() def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``. Note: we use *tgt_dataset* to approximate the length of the source sentence, since we do not know the actual length until after backtranslation. """ tgt_size = self.tgt_dataset.size(index)[0] return (tgt_size, tgt_size) @property def supports_prefetch(self): return getattr(self.tgt_dataset, "supports_prefetch", False) def prefetch(self, indices): return self.tgt_dataset.prefetch(indices)
COCO-LM/fairseq/fairseq/data/backtranslation_dataset.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/backtranslation_dataset.py", "repo_id": "COCO-LM", "token_count": 2472 }
193
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class fastBPEConfig(FairseqDataclass): bpe_codes: str = field(default="???", metadata={"help": "path to fastBPE BPE"}) @register_bpe("fastbpe", dataclass=fastBPEConfig) class fastBPE(object): def __init__(self, cfg): if cfg.bpe_codes is None: raise ValueError("--bpe-codes is required for --bpe=fastbpe") codes = file_utils.cached_path(cfg.bpe_codes) try: import fastBPE self.bpe = fastBPE.fastBPE(codes) self.bpe_symbol = "@@ " except ImportError: raise ImportError("Please install fastBPE with: pip install fastBPE") def encode(self, x: str) -> str: return self.bpe.apply([x])[0] def decode(self, x: str) -> str: return (x + " ").replace(self.bpe_symbol, "").rstrip()
COCO-LM/fairseq/fairseq/data/encoders/fastbpe.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/encoders/fastbpe.py", "repo_id": "COCO-LM", "token_count": 467 }
194
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import FairseqDataset, data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) def check_alignment(alignment, src_len, tgt_len): if alignment is None or len(alignment) == 0: return False if ( alignment[:, 0].max().item() >= src_len - 1 or alignment[:, 1].max().item() >= tgt_len - 1 ): logger.warning("alignment size mismatch found, skipping alignment!") return False return True def compute_alignment_weights(alignments): """ Given a tensor of shape [:, 2] containing the source-target indices corresponding to the alignments, a weight vector containing the inverse frequency of each target index is computed. For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then a tensor containing [1., 0.5, 0.5, 1] should be returned (since target index 3 is repeated twice) """ align_tgt = alignments[:, 1] _, align_tgt_i, align_tgt_c = torch.unique( align_tgt, return_inverse=True, return_counts=True ) align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] return 1.0 / align_weights.float() id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, ) # sort by descending source length src_lengths = torch.LongTensor( [s["source"].ne(pad_idx).long().sum() for s in samples] ) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) tgt_lengths = torch.LongTensor( [s["target"].ne(pad_idx).long().sum() for s in samples] ).index_select(0, sort_order) ntokens = tgt_lengths.sum().item() if samples[0].get("prev_output_tokens", None) is not None: prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target) elif input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) else: ntokens = src_lengths.sum().item() batch = { "id": id, "nsentences": len(samples), "ntokens": ntokens, "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths,}, "target": target, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select( 0, sort_order ) if samples[0].get("alignment", None) is not None: bsz, tgt_sz = batch["target"].shape src_sz = batch["net_input"]["src_tokens"].shape[1] offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz if left_pad_source: offsets[:, 0] += src_sz - src_lengths if left_pad_target: offsets[:, 1] += tgt_sz - tgt_lengths alignments = [ alignment + offset for align_idx, offset, src_len, tgt_len in zip( sort_order, offsets, src_lengths, tgt_lengths ) for alignment in [samples[align_idx]["alignment"].view(-1, 2)] if check_alignment(alignment, src_len, tgt_len) ] if len(alignments) > 0: alignments = torch.cat(alignments, dim=0) align_weights = compute_alignment_weights(alignments) batch["alignments"] = alignments batch["align_weights"] = align_weights if samples[0].get("constraints", None) is not None: # Collate the packed constraints across the samples, padding to # the length of the longest sample. lens = [sample.get("constraints").size(0) for sample in samples] max_len = max(lens) constraints = torch.zeros((len(samples), max(lens))).long() for i, sample in enumerate(samples): constraints[i, 0 : lens[i]] = samples[i].get("constraints") batch["constraints"] = constraints.index_select(0, sort_order) return batch class LanguagePairDataset(FairseqDataset): """ A pair of torch.utils.data.Datasets. Args: src (torch.utils.data.Dataset): source dataset to wrap src_sizes (List[int]): source sentence lengths src_dict (~fairseq.data.Dictionary): source vocabulary tgt (torch.utils.data.Dataset, optional): target dataset to wrap tgt_sizes (List[int], optional): target sentence lengths tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary left_pad_source (bool, optional): pad source tensors on the left side (default: True). left_pad_target (bool, optional): pad target tensors on the left side (default: False). shuffle (bool, optional): shuffle dataset elements before batching (default: True). input_feeding (bool, optional): create a shifted version of the targets to be passed into the model for teacher forcing (default: True). remove_eos_from_source (bool, optional): if set, removes eos from end of source if it's present (default: False). append_eos_to_target (bool, optional): if set, appends eos to end of target if it's absent (default: False). align_dataset (torch.utils.data.Dataset, optional): dataset containing alignments. constraints (Tensor, optional): 2d tensor with a concatenated, zero- delimited list of constraints for each sentence. append_bos (bool, optional): if set, appends bos to the beginning of source/target sentence. num_buckets (int, optional): if set to a value greater than 0, then batches will be bucketed into the given number of batch shapes. src_lang_id (int, optional): source language ID, if set, the collated batch will contain a field 'src_lang_id' in 'net_input' which indicates the source language of the samples. tgt_lang_id (int, optional): target language ID, if set, the collated batch will contain a field 'tgt_lang_id' which indicates the target language of the samples. """ def __init__( self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, shuffle=True, input_feeding=True, remove_eos_from_source=False, append_eos_to_target=False, align_dataset=None, constraints=None, append_bos=False, eos=None, num_buckets=0, src_lang_id=None, tgt_lang_id=None, pad_to_multiple=1, ): if tgt_dict is not None: assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() if tgt is not None: assert len(src) == len( tgt ), "Source and target must contain the same number of examples" self.src = src self.tgt = tgt self.src_sizes = np.array(src_sizes) self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None self.sizes = ( np.vstack((self.src_sizes, self.tgt_sizes)).T if self.tgt_sizes is not None else self.src_sizes ) self.src_dict = src_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.shuffle = shuffle self.input_feeding = input_feeding self.remove_eos_from_source = remove_eos_from_source self.append_eos_to_target = append_eos_to_target self.align_dataset = align_dataset if self.align_dataset is not None: assert ( self.tgt_sizes is not None ), "Both source and target needed when alignments are provided" self.constraints = constraints self.append_bos = append_bos self.eos = eos if eos is not None else src_dict.eos() self.src_lang_id = src_lang_id self.tgt_lang_id = tgt_lang_id if num_buckets > 0: from fairseq.data import BucketPadLengthDataset self.src = BucketPadLengthDataset( self.src, sizes=self.src_sizes, num_buckets=num_buckets, pad_idx=self.src_dict.pad(), left_pad=self.left_pad_source, ) self.src_sizes = self.src.sizes logger.info("bucketing source lengths: {}".format(list(self.src.buckets))) if self.tgt is not None: self.tgt = BucketPadLengthDataset( self.tgt, sizes=self.tgt_sizes, num_buckets=num_buckets, pad_idx=self.tgt_dict.pad(), left_pad=self.left_pad_target, ) self.tgt_sizes = self.tgt.sizes logger.info( "bucketing target lengths: {}".format(list(self.tgt.buckets)) ) # determine bucket sizes using self.num_tokens, which will return # the padded lengths (thanks to BucketPadLengthDataset) num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long]) self.bucketed_num_tokens = num_tokens(np.arange(len(self.src))) self.buckets = [ (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens) ] else: self.buckets = None self.pad_to_multiple = pad_to_multiple def get_batch_shapes(self): return self.buckets def __getitem__(self, index): tgt_item = self.tgt[index] if self.tgt is not None else None src_item = self.src[index] # Append EOS to end of tgt sentence if it does not have an EOS and remove # EOS from end of src sentence if it exists. This is useful when we use # use existing datasets for opposite directions i.e., when we want to # use tgt_dataset as src_dataset and vice versa if self.append_eos_to_target: eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos() if self.tgt and self.tgt[index][-1] != eos: tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) if self.append_bos: bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos() if self.tgt and self.tgt[index][0] != bos: tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]]) bos = self.src_dict.bos() if self.src[index][0] != bos: src_item = torch.cat([torch.LongTensor([bos]), self.src[index]]) if self.remove_eos_from_source: eos = self.src_dict.eos() if self.src[index][-1] == eos: src_item = self.src[index][:-1] example = { "id": index, "source": src_item, "target": tgt_item, } if self.align_dataset is not None: example["alignment"] = self.align_dataset[index] if self.constraints is not None: example["constraints"] = self.constraints[index] return example def __len__(self): return len(self.src) def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate pad_to_length (dict, optional): a dictionary of {'source': source_pad_to_length, 'target': target_pad_to_length} to indicate the max length to pad to in source and target respectively. Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the left if *left_pad_source* is ``True``. - `src_lengths` (LongTensor): 1D Tensor of the unpadded lengths of each source sentence of shape `(bsz)` - `prev_output_tokens` (LongTensor): a padded 2D Tensor of tokens in the target sentence, shifted right by one position for teacher forcing, of shape `(bsz, tgt_len)`. This key will not be present if *input_feeding* is ``False``. Padding will appear on the left if *left_pad_target* is ``True``. - `src_lang_id` (LongTensor): a long Tensor which contains source language IDs of each sample in the batch - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the left if *left_pad_target* is ``True``. - `tgt_lang_id` (LongTensor): a long Tensor which contains target language IDs of each sample in the batch """ res = collate( samples, pad_idx=self.src_dict.pad(), eos_idx=self.eos, left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, input_feeding=self.input_feeding, pad_to_length=pad_to_length, pad_to_multiple=self.pad_to_multiple, ) if self.src_lang_id is not None or self.tgt_lang_id is not None: src_tokens = res["net_input"]["src_tokens"] bsz = src_tokens.size(0) if self.src_lang_id is not None: res["net_input"]["src_lang_id"] = ( torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens) ) if self.tgt_lang_id is not None: res["tgt_lang_id"] = ( torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens) ) return res def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" sizes = self.src_sizes[indices] if self.tgt_sizes is not None: sizes = np.maximum(sizes, self.tgt_sizes[indices]) return sizes def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)).astype(np.int64) else: indices = np.arange(len(self), dtype=np.int64) if self.buckets is None: # sort by target length, then source length if self.tgt_sizes is not None: indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(self.src_sizes[indices], kind="mergesort")] else: # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) return indices[ np.argsort(self.bucketed_num_tokens[indices], kind="mergesort") ] @property def supports_prefetch(self): return getattr(self.src, "supports_prefetch", False) and ( getattr(self.tgt, "supports_prefetch", False) or self.tgt is None ) def prefetch(self, indices): self.src.prefetch(indices) if self.tgt is not None: self.tgt.prefetch(indices) if self.align_dataset is not None: self.align_dataset.prefetch(indices) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ return data_utils.filter_paired_dataset_indices_by_size( self.src_sizes, self.tgt_sizes, indices, max_sizes, )
COCO-LM/fairseq/fairseq/data/language_pair_dataset.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/language_pair_dataset.py", "repo_id": "COCO-LM", "token_count": 9079 }
195
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import hashlib import logging import math import numpy as np from fairseq.data import SampledMultiDataset from .sampled_multi_dataset import CollateFormat, default_virtual_size_func logger = logging.getLogger(__name__) class SampledMultiEpochDataset(SampledMultiDataset): """Samples from multiple sub-datasets according to sampling ratios using virtual epoch sizes to speed up dataloading. Args: datasets ( List[~torch.utils.data.Dataset] or OrderedDict[str, ~torch.utils.data.Dataset] ): datasets sampling_ratios (List[float]): list of probability of each dataset to be sampled (default: None, which corresponds to concating all dataset together). seed (int): RNG seed to use (default: 2). epoch (int): starting epoch number (default: 1). eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures the collater to output batches of data mixed from all sub-datasets, and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys of sub-datasets. Note that not all sub-datasets will present in a single batch in both formats. virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). split (str): the split of the data, e.g. 'train', 'valid' or 'test'. virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded. shared_collater (bool): whether or not to all sub-datasets have the same collater. shard_epoch (int): the real epoch number for shard selection. shuffle (bool): whether or not to shuffle data (default: True). """ def __init__( self, datasets, sampling_ratios=None, seed=2, epoch=1, eval_key=None, collate_format=CollateFormat.single, virtual_size=default_virtual_size_func, split="", virtual_epoch_size=None, shared_collater=False, shard_epoch=1, shuffle=True, ): self.virtual_epoch_size = virtual_epoch_size self._current_epoch_start_index = None self._random_global_indices = None self.shard_epoch = shard_epoch if shard_epoch is not None else 1 self.load_next_shard = None self._epoch_sizes = None super().__init__( datasets=datasets, sampling_ratios=sampling_ratios, seed=seed, epoch=epoch, eval_key=eval_key, collate_format=collate_format, virtual_size=virtual_size, split=split, shared_collater=shared_collater, shuffle=shuffle, ) def _setup(self, epoch): self.virtual_epoch_size = ( self.virtual_epoch_size if self.virtual_epoch_size is not None else self.virtual_size ) if self.virtual_epoch_size > self.virtual_size: logger.warning( f"virtual epoch size {self.virtual_epoch_size} " f"is greater than virtual dataset size {self.virtual_size}" ) self.virtual_epoch_size = self.virtual_size self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size) self._current_epoch_start_index = self._get_epoch_start_index(epoch) logger.info( f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}" ) def _map_epoch_index_to_global(self, index): index = self._current_epoch_start_index + index # add randomness return self._random_global_indices[index] @property def sizes(self): if self._epoch_sizes is not None: return self._epoch_sizes _sizes = super().sizes indices = self._random_global_indices[ self._current_epoch_start_index : self._current_epoch_start_index + len(self) ] self._epoch_sizes = _sizes[indices] # del super()._sizes to save memory del self._sizes self._sizes = None return self._epoch_sizes def _get_dataset_and_index(self, index): i = self._map_epoch_index_to_global(index) return super()._get_dataset_and_index(i) def __len__(self): return ( self.virtual_epoch_size if self._current_epoch_start_index + self.virtual_epoch_size < self.virtual_size else self.virtual_size - self._current_epoch_start_index ) def set_epoch(self, epoch): if self._current_epoch_start_index is None: # initializing epoch idnices of a virtual dataset self._setup(epoch) self._next_virtual_epoch(epoch) else: # working on already intialized epoch indices if epoch == self._cur_epoch: # re-enter so return return self._next_virtual_epoch(epoch) def _get_epoch_start_index(self, epoch): assert epoch >= 1 # fairseq is using 1-based epoch everywhere return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size def _next_global_indices(self, epoch): rng = np.random.RandomState( [ int( hashlib.sha1( str(self.__class__.__name__).encode("utf-8") ).hexdigest(), 16, ) % (2 ** 32), self.seed % (2 ** 32), # global seed epoch, # epoch index, ] ) del self._random_global_indices self._random_global_indices = rng.choice( self.virtual_size, self.virtual_size, replace=False ) if self.load_next_shard is None: self.load_next_shard = False else: # increase shard epoch for next loading self.shard_epoch += 1 self.load_next_shard = True logger.info( "to load next epoch/shard in next load_dataset: " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) def _next_virtual_epoch(self, epoch): index = self._get_epoch_start_index(epoch) if index == 0 or self._random_global_indices is None: # need to start from the beginning, # so call super().set_epoch(epoch) to establish the global virtual indices logger.info( "establishing a new set of global virtual indices for " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) super().set_epoch(epoch) self._next_global_indices(epoch) else: self._cur_epoch = epoch # reset cache sizes and ordered_indices for the epoch after moving to a new epoch self._clean_if_not_none( [ self._epoch_sizes, ] ) self._epoch_sizes = None self._current_epoch_start_index = index
COCO-LM/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py", "repo_id": "COCO-LM", "token_count": 3554 }
196
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np from fairseq.data import data_utils from . import BaseWrapperDataset class TruncateDataset(BaseWrapperDataset): """Truncate a sequence by returning the first truncation_length tokens""" def __init__(self, dataset, truncation_length): super().__init__(dataset) assert truncation_length is not None self.truncation_length = truncation_length self.dataset = dataset def __getitem__(self, index): item = self.dataset[index] item_len = item.size(0) if item_len > self.truncation_length: item = item[: self.truncation_length] return item @property def sizes(self): return np.minimum(self.dataset.sizes, self.truncation_length) def __len__(self): return len(self.dataset) class RandomCropDataset(TruncateDataset): """Truncate a sequence by returning a random crop of truncation_length tokens""" def __init__(self, dataset, truncation_length, seed=1): super().__init__(dataset, truncation_length) self.seed = seed self.epoch = 0 @property def can_reuse_epoch_itr_across_epochs(self): return True # only the crop changes, not item sizes def set_epoch(self, epoch, **unused): super().set_epoch(epoch) self.epoch = epoch def __getitem__(self, index): with data_utils.numpy_seed(self.seed, self.epoch, index): item = self.dataset[index] item_len = item.size(0) excess = item_len - self.truncation_length if excess > 0: start_idx = np.random.randint(0, excess) item = item[start_idx : start_idx + self.truncation_length] return item def maybe_shorten_dataset( dataset, split, shorten_data_split_list, shorten_method, tokens_per_sample, seed, ): truncate_split = ( split in shorten_data_split_list.split(",") or len(shorten_data_split_list) == 0 ) if shorten_method == "truncate" and truncate_split: dataset = TruncateDataset(dataset, tokens_per_sample) elif shorten_method == "random_crop" and truncate_split: dataset = RandomCropDataset(dataset, tokens_per_sample, seed) return dataset
COCO-LM/fairseq/fairseq/data/shorten_dataset.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/data/shorten_dataset.py", "repo_id": "COCO-LM", "token_count": 1016 }
197
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum, EnumMeta from typing import List class StrEnumMeta(EnumMeta): # this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see # https://github.com/facebookresearch/hydra/issues/1156 @classmethod def __instancecheck__(cls, other): return "enum" in str(type(other)) class StrEnum(Enum, metaclass=StrEnumMeta): def __str__(self): return self.value def __eq__(self, other: str): return self.value == other def __repr__(self): return self.value def __hash__(self): return hash(str(self)) def ChoiceEnum(choices: List[str]): """return the Enum class used to enforce list of choices""" return StrEnum("Choices", {k: k for k in choices}) LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"]) DDP_BACKEND_CHOICES = ChoiceEnum([ "c10d", # alias for pytorch_ddp "fully_sharded", # FullyShardedDataParallel from fairscale "legacy_ddp", "no_c10d", # alias for legacy_ddp "pytorch_ddp", "slow_mo", ]) DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta"]) GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"]) GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum( ["unigram", "ensemble", "vote", "dp", "bs"] ) ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"]) PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"]) PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
COCO-LM/fairseq/fairseq/dataclass/constants.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/dataclass/constants.py", "repo_id": "COCO-LM", "token_count": 648 }
198
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import bisect import time from collections import OrderedDict from typing import Dict, Optional try: import torch def type_as(a, b): if torch.is_tensor(a) and torch.is_tensor(b): return a.to(b) else: return a except ImportError: torch = None def type_as(a, b): return a try: import numpy as np except ImportError: np = None class Meter(object): """Base class for Meters.""" def __init__(self): pass def state_dict(self): return {} def load_state_dict(self, state_dict): pass def reset(self): raise NotImplementedError @property def smoothed_value(self) -> float: """Smoothed value used for logging.""" raise NotImplementedError def safe_round(number, ndigits): if hasattr(number, "__round__"): return round(number, ndigits) elif torch is not None and torch.is_tensor(number) and number.numel() == 1: return safe_round(number.item(), ndigits) elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"): return safe_round(number.item(), ndigits) else: return number class AverageMeter(Meter): """Computes and stores the average and current value""" def __init__(self, round: Optional[int] = None): self.round = round self.reset() def reset(self): self.val = None # most recent update self.sum = 0 # sum from all updates self.count = 0 # total n from all updates def update(self, val, n=1): if val is not None: self.val = val if n > 0: self.sum = type_as(self.sum, val) + (val * n) self.count = type_as(self.count, n) + n def state_dict(self): return { "val": self.val, "sum": self.sum, "count": self.count, "round": self.round, } def load_state_dict(self, state_dict): self.val = state_dict["val"] self.sum = state_dict["sum"] self.count = state_dict["count"] self.round = state_dict.get("round", None) @property def avg(self): return self.sum / self.count if self.count > 0 else self.val @property def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class TimeMeter(Meter): """Computes the average occurrence of some event per second""" def __init__( self, init: int = 0, n: int = 0, round: Optional[int] = None, ): self.round = round self.reset(init, n) def reset(self, init=0, n=0): self.init = init self.start = time.perf_counter() self.n = n self.i = 0 def update(self, val=1): self.n = type_as(self.n, val) + val self.i += 1 def state_dict(self): return { "init": self.elapsed_time, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): if "start" in state_dict: # backwards compatibility for old state_dicts self.reset(init=state_dict["init"]) else: self.reset(init=state_dict["init"], n=state_dict["n"]) self.round = state_dict.get("round", None) @property def avg(self): return self.n / self.elapsed_time @property def elapsed_time(self): return self.init + (time.perf_counter() - self.start) @property def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { "sum": self.sum, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.n = state_dict["n"] self.start_time = None self.round = state_dict.get("round", None) @property def avg(self): return self.sum / self.n if self.n > 0 else self.sum @property def elapsed_time(self): if self.start_time is None: return 0.0 return time.perf_counter() - self.start_time @property def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val class MetersDict(OrderedDict): """A sorted dictionary of :class:`Meters`. Meters are sorted according to a priority that is given when the meter is first added to the dictionary. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.priorities = [] def __setitem__(self, key, value): assert key not in self, "MetersDict doesn't support reassignment" priority, value = value bisect.insort(self.priorities, (priority, len(self.priorities), key)) super().__setitem__(key, value) for _, _, key in self.priorities: # reorder dict to match priorities self.move_to_end(key) def add_meter(self, key, meter, priority): self.__setitem__(key, (priority, meter)) def state_dict(self): return [ (pri, key, self[key].__class__.__name__, self[key].state_dict()) for pri, _, key in self.priorities # can't serialize DerivedMeter instances if not isinstance(self[key], MetersDict._DerivedMeter) ] def load_state_dict(self, state_dict): self.clear() self.priorities.clear() for pri, key, meter_cls, meter_state in state_dict: meter = globals()[meter_cls]() meter.load_state_dict(meter_state) self.add_meter(key, meter, pri) def get_smoothed_value(self, key: str) -> float: """Get a single smoothed value.""" meter = self[key] if isinstance(meter, MetersDict._DerivedMeter): return meter.fn(self) else: return meter.smoothed_value def get_smoothed_values(self) -> Dict[str, float]: """Get all smoothed values.""" return OrderedDict( [ (key, self.get_smoothed_value(key)) for key in self.keys() if not key.startswith("_") ] ) def reset(self): """Reset Meter instances.""" for meter in self.values(): if isinstance(meter, MetersDict._DerivedMeter): continue meter.reset() class _DerivedMeter(Meter): """A Meter whose values are derived from other Meters.""" def __init__(self, fn): self.fn = fn def reset(self): pass
COCO-LM/fairseq/fairseq/logging/meters.py/0
{ "file_path": "COCO-LM/fairseq/fairseq/logging/meters.py", "repo_id": "COCO-LM", "token_count": 3631 }
199