text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
from archai.common.common import common_init from archai.supergraph.utils.augmented_searcher import search if __name__ == "__main__": conf = common_init(config_filepath="confs/aug/wresnet40x2_cifar10_b512.yaml") search(conf)
archai/scripts/supergraph/augmented_search.py/0
{ "file_path": "archai/scripts/supergraph/augmented_search.py", "repo_id": "archai", "token_count": 89 }
342
import random import ray def sample_from_parent_pool(parentpool): if len(parentpool): # Finds a model by computing the lower convex hull of the # genotype-accuracy pool and sampling along a tolerance region around it. # Here we are just going to simulate it. index = random.randint(0, len(parentpool) - 1) genotype, accuracy = list(parentpool)[index] return genotype else: None @ray.remote def train_a_model(all_info_for_train_model): # Simulate training a model # time.sleep(random.randint(0, 3)) up_lim = random.randint(1000, 1000000) counter = 0 for i in range(up_lim): counter = counter + 1 # Return trained model acc = random.random() print(f"Trained {all_info_for_train_model}") return all_info_for_train_model, acc if __name__ == "__main__": ray.init() num_cpus = ray.nodes()[0]["Resources"]["CPU"] # Seed genotype seed_genotype = "randomstring" seed_genotype_accuracy = 0.1 # Need parent, init list parent_set = {(seed_genotype, seed_genotype_accuracy)} # Sample a model from parent pool and add to init queue # according to pareto frontier stuff model = sample_from_parent_pool(parent_set) model = model + "_init" # Parallel train result_all_ids = [train_a_model.remote(model)] while len(result_all_ids): print(f"Num jobs {len(result_all_ids)}") done_id, result_all_ids = ray.wait(result_all_ids) print(f"After ray.wait {len(result_all_ids)}") # NOTE: Why do we need to index into done_id? trained_model, acc = ray.get(done_id[0]) if trained_model[-4:] == "init": # Augmented model just finished training # Start another remote job to train it trained_model = trained_model + "_child" this_id = train_a_model.remote(trained_model) result_all_ids.append(this_id) elif trained_model[-5:] == "child": # Final model just finished training # Add it to the parent set # And sample another model parent_set.add((trained_model, acc)) print(f"Parent set size {len(parent_set)}") model = sample_from_parent_pool(parent_set) model = model + "_init" this_id = train_a_model.remote(model) result_all_ids.append(this_id) # If there are less models being trained in parallel # than there are cpus (later gpus) sample more models # from parent pool to not waste compute if len(result_all_ids) < num_cpus: num_empty_res = int(num_cpus - len(result_all_ids)) model_list = [] for i in range(num_empty_res): model = sample_from_parent_pool(parent_set) model = model + "_init" model_list.append(model) result_ids = [train_a_model.remote(model) for model in model_list] result_all_ids += result_ids
archai/scripts/supergraph/petridish_ray_mock.py/0
{ "file_path": "archai/scripts/supergraph/petridish_ray_mock.py", "repo_id": "archai", "token_count": 1299 }
343
{ // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ { "name": "Python: Current File", "type": "python", "request": "launch", "program": "${file}", "console": "integratedTerminal", "justMyCode": true, "args":[ ] } ] }
archai/tasks/face_segmentation/.vscode/launch.json/0
{ "file_path": "archai/tasks/face_segmentation/.vscode/launch.json", "repo_id": "archai", "token_count": 249 }
344
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. # See Readme.md import argparse import json import os import sys import glob import time from datetime import datetime import platform import statistics import tracemalloc import gc import psutil import logging import traceback from shutil import rmtree from archai.common.store import ArchaiStore from usage import add_usage from cleanup_stale_pods import cleanup_stale_pods from azure.data.tables import EntityProperty, EdmType # This file contains wrappers on the snpe execution of the model connecting everything # to the Azure table describing the jobs that need to be run, and keeping the status # rows up to date while these jobs are running. CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING' SNPE_OUTPUT_DIR = 'snpe_output' MODEL_DIR = 'model' SNPE_MODEL_DIR = 'snpe_models' SNPE_OUTPUT_DIR = 'snpe_output' MAX_BENCHMARK_RUNS = 5 BENCHMARK_INPUT_SIZE = 50 CLEAR_RANDOM_INPUTS = 0 LOG_FILE_NAME = 'memusage.log' DEVICE_FILE = "device.txt" UNIQUE_NODE_ID = None BENCHMARK_RUN_COUNT = 0 SCRIPT_DIR = os.path.dirname(__file__) sys.path += [os.path.join(SCRIPT_DIR, '..', 'snpe')] sys.path += [os.path.join(SCRIPT_DIR, '..', 'util')] sys.path += [os.path.join(SCRIPT_DIR, '..', 'vision')] rss_start = None # Set the logging level for all azure-* libraries logging.getLogger('azure').setLevel(logging.ERROR) logging.getLogger('azure.core.pipeline.policies.http_logging_policy').setLevel(logging.ERROR) from test_snpe import convert_model, quantize_model, run_benchmark from test_snpe import run_batches, set_device, get_device from create_data import create_dataset from collect_metrics import get_metrics from priority_queue import PriorityQueue from test_onnx import test_onnx from dlc_helper import get_dlc_metrics logger = logging.getLogger(__name__) store : ArchaiStore = None usage : ArchaiStore = None def log(msg): print(msg) logger.info(msg) def log_error(error_type, value, stack): log(f'### Exception: {error_type}: {value}') for line in traceback.format_tb(stack): log(line.strip()) def read_shape(dir): shape_file = os.path.join(dir, 'shape.txt') if os.path.isfile(shape_file): with open(shape_file, 'r', encoding='utf-8') as f: return eval(f.readline().strip()) return [0, 0, 0] def save_shape(dir, shape): shape_file = os.path.join(dir, 'shape.txt') with open(shape_file, 'w', encoding='utf-8') as f: f.write(str(shape)) return [0, 0, 0] def check_device(device): set_device(device) device_info = '' if os.path.isfile(DEVICE_FILE): with open(DEVICE_FILE, 'r', encoding='utf-8') as f: device_info = f.readline().strip() if device_info != device: with open(DEVICE_FILE, 'w', encoding='utf-8') as f: f.write(f"{device}\n") def check_dataset(shape, name, test_size): w, h, c = shape img_size = (w, h) test = os.path.join('data', name) if os.path.isdir(test): s = read_shape(test) if s != shape: log(f"recreating {name} folder since shape needs to change from {s} to {shape}") rmtree(test) else: bins = [x for x in os.listdir(test) if x.endswith('.bin')] if len(bins) != test_size: log(f"recreating test folder since it had {len(bins)} images") rmtree(test) if not os.path.isdir(test): create_dataset(dataset, name, img_size, test_size) save_shape(test, shape) def get_entity_shape(entity, name): if name in entity: return eval(entity[name]) return [] def record_error(entity, error_message): global store entity['status'] = 'error' entity['error'] = error_message store.merge_status_entity(entity) def convert(experiment, name, entity, long_name, model_path): global store log("Converting model: " + long_name) entity['model_name'] = long_name entity['status'] = 'converting' store.merge_status_entity(entity) model_dir = os.path.join(name, SNPE_MODEL_DIR) model, input_shape, output_shape, error = convert_model(model_path, model_dir) if error: record_error(entity, error) return 'error' if input_shape != get_entity_shape(entity, 'shape') or output_shape != get_entity_shape(entity, 'output_shape'): entity['shape'] = str(input_shape) entity['output_shape'] = str(output_shape) store.merge_status_entity(entity) log("Uploading converted model: " + model) blob_name = f'{experiment}/{name}' store.upload_blob(blob_name, model) return model def quantize(experiment, name, entity, onnx_model, model): global store log("Quantizing model: " + name + "...") log(" (Please be patient this can take a while, up to 10 minutes or more)") entity['status'] = 'quantizing' store.merge_status_entity(entity) input_shape = eval(entity['shape']) check_dataset(input_shape, 'quant', 1000) snpe_model_dir = os.path.join(name, SNPE_MODEL_DIR) model, error = quantize_model(model, onnx_model, snpe_model_dir) if error: record_error(entity, error) return 'error' # save the quantized .dlc since it takes so long to produce. log("Uploading quantized model: " + model) blob_name = f'{experiment}/{name}' store.upload_blob(blob_name, model) return model def get_unique_node_id(): global UNIQUE_NODE_ID if UNIQUE_NODE_ID: return UNIQUE_NODE_ID return platform.node() def set_unique_node_id(id): global UNIQUE_NODE_ID UNIQUE_NODE_ID = id def is_locked(entity): node = get_unique_node_id() if 'node' in entity and entity['node']: name = entity['name'] locked = entity['node'] if locked != node: log(f"{node}: model {name} is running on: {locked}") return 'busy' return None def lock_job(entity): global store node = get_unique_node_id() name = entity['name'] # make sure we have the most up to date version of the entity. entity = store.get_status(name) retries = 10 while retries: retries -= 1 if is_locked(entity): # someone beat us to it raise Exception('lock encountered') entity['node'] = node try: store.merge_status_entity(entity) break except Exception as e: # someone beat us to it! log(f"lock failed: {e}") log("entity may have been changed by someone else, trying again...") # make sure we really got the lock! entity = store.get_status(name) if 'node' in entity and entity['node'] == node: return entity # someone beat us to it raise Exception('lock encountered') def unlock_job(entity): global store node = get_unique_node_id() # make sure we have the most up to date version of the entity. entity = store.get_status(entity['name']) if 'node' in entity: if entity['node'] and entity['node'] != node: lock = entity['node'] raise Exception(f'cannot unlock entity because it is locked by someone else ({lock})') else: entity['node'] = '' retries = 10 while retries: retries -= 1 try: store.merge_status_entity(entity) break except: # someone beat us to it! log("unlock failed, entity changed by someone else, trying again...") return entity def run_onnx(name, dataset, model_path, test_size): out_dir = os.path.join(name, SNPE_OUTPUT_DIR, 'onnx_outputs') if os.path.isdir(out_dir): rmtree(out_dir) test_onnx(dataset, model_path, out_dir, test_size) return out_dir def is_complete(entity, prop): return prop in entity def is_true(entity, prop): return prop in entity and entity[prop] def get_total_inference_avg(entity): if 'total_inference_avg' in entity and entity['total_inference_avg']: try: return json.loads(entity['total_inference_avg']) except: pass return [] def benchmarks_complete(entity): return len(get_total_inference_avg(entity)) def get_mean_benchmark(entity): avg = get_total_inference_avg(entity) if len(avg) > 0: return statistics.mean(avg) return 0 def get_avg_latency(latencies): count = 0 sum = 0 for m in latencies: for ifs in m['total_inference_time']: sum += float(ifs) count += 1 return sum / count def benchmark(experiment, entity, onnx_model, model, name, test_input): global BENCHMARK_RUN_COUNT, CLEAR_RANDOM_INPUTS, store, usage # next highest priority is to get benchmark times total_benchmark_runs = benchmarks_complete(entity) if (total_benchmark_runs >= MAX_BENCHMARK_RUNS): return False # nothing to do if total_benchmark_runs < MAX_BENCHMARK_RUNS: BENCHMARK_RUN_COUNT += 1 if CLEAR_RANDOM_INPUTS > 0 and BENCHMARK_RUN_COUNT >= CLEAR_RANDOM_INPUTS: clear_random_inputs() BENCHMARK_RUN_COUNT = 0 log(f"Running benchmark iteration {total_benchmark_runs} of {MAX_BENCHMARK_RUNS}...") entity['status'] = 'running benchmark' store.merge_status_entity(entity) start = store.get_utc_date() # TODO: calibrate the duration from 10 seconds to whatever time would produce the best results... output_dir, latencies = run_benchmark(onnx_model, model, test_input, 10, name) ifs = get_avg_latency(latencies) end = store.get_utc_date() add_usage(usage, get_device(), start, end) for file in glob.glob(os.path.join(output_dir, 'perf_results*.csv')): store.upload_blob(f'{experiment}/{name}', file) total_inference_avg = get_total_inference_avg(entity) total_inference_avg += [ifs] entity['total_inference_avg'] = json.dumps(total_inference_avg) mean = statistics.mean(total_inference_avg) entity['mean'] = mean if len(total_inference_avg) > 1: stdev = statistics.stdev(total_inference_avg) entity['stdev'] = (stdev * 100) / mean total_benchmark_runs += 1 else: mean = get_mean_benchmark(entity) if is_benchmark_only(entity, False) and total_benchmark_runs == MAX_BENCHMARK_RUNS: entity['status'] = 'complete' entity['completed'] = store.get_utc_date() store.merge_status_entity(entity) return True def ensure_complete(entity): global store if entity['status'] != 'complete': entity['status'] = 'complete' name = entity['name'] log(f"Completed {name}") store.merge_status_entity(entity) def run_model(experiment, name, dataset, use_device, benchmark_only, no_quantization): global store, usage log("===================================================================================================") log(f"Checking model: {name} on node {get_unique_node_id()}") log("===================================================================================================") with open('name.txt', 'w', encoding='utf-8') as file: file.write(name + '\n') # make sure we have a clean slate and don't pick up old files from previous runs model_dir = os.path.join(name, MODEL_DIR) if os.path.isdir(model_dir): rmtree(model_dir) os.makedirs(model_dir) snpe_model_dir = os.path.join(name, SNPE_MODEL_DIR) if os.path.isdir(snpe_model_dir): rmtree(snpe_model_dir) snpe_output_dir = os.path.join(name, SNPE_OUTPUT_DIR) if os.path.isdir(snpe_output_dir): rmtree(snpe_output_dir) benchmark_dir = os.path.join(name, 'benchmark') if os.path.isdir(benchmark_dir): rmtree(benchmark_dir) entity = store.get_status(name) blob_name = f'{experiment}/{name}' downloaded = store.download(blob_name, model_dir, r'.*\.onnx$') if len(downloaded) == 0 or not os.path.isfile(downloaded[0]): record_error(entity, 'missing model') log(f"### no model found for {name}") return onnx_model = downloaded[0] long_name = os.path.basename(onnx_model) # see if we have converted the model or not. # do this first no matter what. converted = len(store.list_blobs(f'{blob_name}/model.dlc')) > 0 is_quantized = len(store.list_blobs(f'{blob_name}/model.quant.dlc')) > 0 if not is_quantized: # oh, the quant model disappeared so clear the flag so it gets # quantized again by a machine that can do that. if 'quantized' in entity: del entity['quantized'] store.update_status_entity(entity) if no_quantization: return if 'shape' not in entity: # hmmm, a bad reset? Then pretend it is not converted so we get the shape back. converted = False if not converted: model = convert(experiment, name, entity, long_name, onnx_model) if model == 'error': return elif converted: downloaded = store.download(blob_name, snpe_model_dir, 'model.dlc') if len(downloaded) == 0: raise Exception('### internal error, the model.dlc download failed!') elif not is_quantized and not converted: record_error(entity, 'missing model') log(f"### no model found for {name}") return # see if we have a quantized model or not. model = os.path.join(snpe_model_dir, 'model.dlc') if not is_quantized: model = quantize(experiment, name, entity, onnx_model, model) if model == 'error': return entity['quantized'] = True if 'macs' in entity: del entity['macs'] # need to redo it since we re-quantized. store.update_status_entity(entity) else: entity['quantized'] = True store.merge_status_entity(entity) quantized_model = os.path.join(snpe_model_dir, 'model.quant.dlc') if not os.path.isfile(quantized_model): downloaded = store.download(blob_name, snpe_model_dir, 'model.quant.dlc') if len(downloaded) == 0 or not os.path.isfile(downloaded[0]): raise Exception("??? quantized model should exist at this point...") quantized_model = downloaded[0] if 'macs' not in entity: csv_data, macs, params = get_dlc_metrics(quantized_model) entity['macs'] = macs entity['params'] = params entity['status'] = 'converted' store.merge_status_entity(entity) csv_file = os.path.join(snpe_model_dir, 'model.quant.info.csv') with open(csv_file, 'w') as f: f.write(csv_data) store.upload_blob(blob_name, csv_file) return input_shape = eval(entity['shape']) if use_device: check_dataset(input_shape, 'test', 1000) test_input = os.path.realpath(os.path.join('data', 'test')) if benchmark(experiment, entity, onnx_model, quantized_model, name, test_input): return if benchmark_only: log(f"Benchmark only has nothing to do on model {name}") ensure_complete(entity) return # next highest priority is to get the 1k f1 score. test_size = 0 prop = None if use_device and not is_complete(entity, 'f1_1k'): test_size = 1000 prop = 'f1_1k' model = quantized_model # use the quantized model elif use_device and not is_complete(entity, 'f1_1k_f'): test_size = 1000 prop = 'f1_1k_f' if not converted: # this is a model that is prequantized, we don't have the original entity[prop] = 'n/a' entity['status'] = '.dlc model not found' store.merge_status_entity(entity) return os.remove(quantized_model) # make sure we can't run this one. elif use_device and not is_complete(entity, 'f1_10k'): test_size = 10000 prop = 'f1_10k' model = quantized_model # use the quantized model elif not is_complete(entity, 'f1_onnx'): test_size = 10000 prop = 'f1_onnx' model = onnx_model else: # why are we here? return log(f"==> running {prop} test using model {model}") # copy model to the device. if prop != 'f1_onnx': # now that we have the shape, we can create the appropriate quant and test # datasets! check_dataset(input_shape, 'test', test_size) if prop == 'f1_onnx': entity['status'] = f'Running {prop}' store.merge_status_entity(entity) snpe_output_dir = run_onnx(name, dataset, onnx_model, test_size) else: entity['status'] = f'Running {prop}' store.merge_status_entity(entity) test_input = os.path.realpath(os.path.join('data', 'test')) start = store.get_utc_date() snpe_output_dir, latencies = run_batches(onnx_model, model, test_input, name) end = store.get_utc_date() add_usage(usage, get_device(), start, end) try: use_pillow = 'use_pillow' in entity and entity['use_pillow'] num_classes = 19 if 'output_shape' in entity: w, h, num_classes = eval(entity['output_shape']) test_results, chart, f1score = get_metrics(input_shape, False, dataset, snpe_output_dir, num_classes, use_pillow) except Exception as ex: record_error(entity, str(ex)) return log(f"### Saving {prop} score of {f1score}") entity[prop] = f1score store.merge_status_entity(entity) store.upload_blob(blob_name, test_results, f"test_results_{prop}.csv") store.upload_blob(blob_name, chart, f"pr_curve_{prop}.png") if 'f1_1k' in entity and 'f1_10k' in entity and 'f1_1k_f' in entity and 'f1_onnx' in entity: ensure_complete(entity) def clear_random_inputs(): if os.path.isdir('random_inputs'): log("Clearing random_inputs.") rmtree('random_inputs') def is_benchmark_only(entity, benchmark_only): benchmark_only_flag = benchmark_only if 'benchmark_only' in entity: benchmark_only_flag = int(entity['benchmark_only']) return benchmark_only_flag def node_quantizing(): """ Ee don't want to do more than one quantization at a time on a given node because it is an CPU intensive operation. """ global store id = platform.node() + '_' count = 0 for e in store.get_all_status_entities(status='complete', not_equal=True): status = '' if 'status' in e: status = e['status'] if 'node' not in e: continue node = e['node'] if node.startswith(id) and node != get_unique_node_id() and \ (status == 'converting' or status == 'quantizing'): count += 1 return count > 0 def check_stale_pods(timeout=3600): """ This function checks whether any quantization jobs are getting stuck in the kubernetes cluster for longer than the given timeout and automatically resets them if the kubernetes pod no longer exists. """ global store clean = False for entity in store.get_all_status_entities(status='complete', not_equal=True): if is_locked(entity): node = entity['node'] if node.startswith("snpe-quantizer"): utc_format = "%Y-%m-%dT%H:%M:%SZ" if 'check' not in entity: entity['check'] = datetime.strftime(datetime.utcnow(), utc_format) store.merge_status_entity(entity) else: start_time = datetime.strptime(entity['check'], utc_format) diff = datetime.utcnow() - start_time if diff.seconds > timeout: clean = True break if clean: cleanup_stale_pods(store) for entity in store.get_all_status_entities(status='complete', not_equal=True): if 'check' in entity: del entity['check'] store.update_status_entity(entity) # flake8: noqa: C901 def find_work_prioritized(use_device, benchmark_only, subset_list, no_quantization): global store queue = PriorityQueue() quantizing = no_quantization or node_quantizing() for entity in store.get_all_status_entities(status='complete', not_equal=True): name = entity['name'] if subset_list is not None and name not in subset_list: log(f"# skipping model {name} because it is in the subset list") continue total_benchmark_runs = benchmarks_complete(entity) if is_locked(entity): log(f"# skip entity {name} because someone else is working on it") continue if 'error' in entity: log(f"# skipping {name} because something went wrong on previous step.") continue if not is_complete(entity, 'macs') or not is_true(entity, 'quantized'): if quantizing: if no_quantization: log(f"This node is running with --no_quantization, skipping mode '{name}' for now until " + "quantization cluster completes.") else: log(f"Skipping model '{name}' for now until other quantization finishes on our node") continue priority = 20 elif use_device and (total_benchmark_runs < MAX_BENCHMARK_RUNS): priority = 30 + total_benchmark_runs elif is_benchmark_only(entity, benchmark_only): # this model is done! continue elif not is_complete(entity, 'f1_onnx'): priority = 60 elif use_device and not is_complete(entity, 'f1_1k'): priority = 100 + get_mean_benchmark(entity) elif use_device and not is_complete(entity, 'f1_1k_f'): priority = 100 + get_mean_benchmark(entity) * 10 elif use_device and not is_complete(entity, 'f1_10k'): # prioritize by how fast the model is! priority = 100 + get_mean_benchmark(entity) * 100 else: # this model is done! continue if 'priority' in entity: # allow user to override the priority priority = int(entity['priority']) queue.enqueue(priority, entity) return queue def garbage_collect(): # remove old folders so we don't grow disk usage forever now = time.time() one_day = 60 * 60 * 24 for f in list(os.listdir()): if os.path.isdir(f) and f != 'data' and f != 'random_inputs' and f != 'DEVICE_FILE': mod = os.path.getmtime(f) if now - mod > one_day: log(f"Garbage collecting {f}...") rmtree(f) class MemoryMonitor: def __init__(self): self.rss_start = None self.growth = 0 def heap_growth(self): rss = psutil.Process(os.getpid()).memory_info().rss if self.rss_start is None: self.rss_start = rss growth = rss / self.rss_start logging.info(f"========= memory rss={rss} growth={growth}============") return growth def monitor(experiment, dataset, use_device, benchmark_only, subset_list, no_quantization): global rss_start, store, usage logging.basicConfig(filename=LOG_FILE_NAME, filemode='a', level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') monitor = MemoryMonitor() file_mod = os.path.getmtime(__file__) # terminate this script if the memory has grown too much or the script # itself has been modified. This will cause the outer 'loop.sh' to # loop and start a fesh process and pick any code modifications. while monitor.heap_growth() < 10: if file_mod != os.path.getmtime(__file__): log("Code has changed, need to restart.") return 0 try: queue = find_work_prioritized(use_device, benchmark_only, subset_list, no_quantization) except Exception as e: log(f"Error in find_work_prioritized: {e}") time.sleep(60) continue if queue.size() == 0: log("No work found.") return 0 else: garbage_collect() # do the top priority job then go back to find_work_prioritized in case # other jobs were add/completed in parallel while this was executing. priority, entity = queue.dequeue() name = entity['name'] locked = False try: entity = lock_job(entity) locked = True benchmark_only_flag = is_benchmark_only(entity, benchmark_only) gc.collect() tracemalloc.start() snapshot1 = tracemalloc.take_snapshot() run_model(experiment, name, dataset, use_device, benchmark_only_flag, no_quantization) gc.collect() snapshot2 = tracemalloc.take_snapshot() for i in snapshot2.compare_to(snapshot1, 'lineno')[:10]: logging.info(i) unlock_job(entity) except Exception as e: error_type, value, stack = sys.exc_info() if str(e) == 'lock encountered': log('model is running on another machine') elif 'ConnectionResetError' in str(e): log('ConnectionResetError: Ignoring Azure flakiness...') unlock_job(entity) else: # bug in the script somewhere... don't leave the node locked. log_error(error_type, value, stack) if locked: unlock_job(entity) sys.exit(1) time.sleep(10) # give other machines a chance to grab work so we don't get stuck in retry loops. # we terminate here to reclaim the leaked memory, and to ensure we shut down cleanly without # leaving any rows in the table locked, we have an outer loop.sh script that will restart the runner. log("Memory leak detected") return 0 def get_storage_account(con_str): parts = con_str.split(';') for part in parts: name_value = part.split('=') if name_value[0] == 'AccountName' and len(name_value) > 1: return name_value[1] def setup_store(): global store, usage experiment_name = os.getenv("EXPERIMENT_NAME", "facesynthetics") conn_string = os.getenv(CONNECTION_NAME) if not conn_string: log(f"Please specify your {CONNECTION_NAME} environment variable.") sys.exit(1) storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(conn_string) store = ArchaiStore(storage_account_name, storage_account_key, table_name=experiment_name) usage = ArchaiStore(storage_account_name, storage_account_key, table_name='usage') return conn_string def check_environment(): con_str = os.getenv(CONNECTION_NAME) if not con_str: log(f"Please set your {CONNECTION_NAME} environment variable.") sys.exit(1) print(f'Using storage account: "{get_storage_account(con_str)}"') snpe_root = os.getenv("SNPE_ROOT") if not snpe_root: log("Please specify your 'SNPE_ROOT' environment variable.") sys.exit(1) if not os.path.isdir(snpe_root): log(f"Your SNPE_ROOT '{snpe_root} is not found.") sys.exit(1) sys.path += [f'{snpe_root}/benchmarks', f'{snpe_root}/lib/python'] ndk = os.getenv("ANDROID_NDK_ROOT") if not ndk: log("you must have a ANDROID_NDK_ROOT installed, see the ../device/readme.md") sys.exit(1) if not os.path.isdir(ndk): log(f"Your ANDROID_NDK_ROOT '{ndk} is not found.") sys.exit(1) dataset = os.getenv("INPUT_DATASET") if not dataset: log("please provide --input or set your INPUT_DATASET environment variable") sys.exit(1) if not os.path.isdir(dataset): log(f"Your INPUT_DATASET '{dataset} is not found.") sys.exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Test the models as they appears in our Azure table') parser.add_argument('--device', '-d', help='Specify which Qualcomm device device to use (default None).') parser.add_argument('--benchmark', help='Run benchmark tests only (no F1 tests).', action="store_true") parser.add_argument('--max_benchmark_runs', type=int, help='Set maximum number of benchmark runs per model ' + '(default 5).', default=5) parser.add_argument('--subset', help='Comma separated list of friendly model names to focus on, ' + 'ignoring all other models.') parser.add_argument('--clear_random_inputs', type=int, help='How many benchmark runs before clearing ' + 'random_inputs (default 0 means no clearing).', default=0) parser.add_argument('--no_quantization', help='Do not do any quantization work on this machine.', action="store_true") parser.add_argument('--working', help='Use this working folder for all downloaded models and temp files ' + '(default cwd).') parser.add_argument('--cleanup_stale_pods', type=int, help='specify how often (in seconds) to check for stale ' + 'kubernetes pods that need to be cleaned up. You can also run this manually, see ' + 'the clean_stale_pods.py script.', default=0) args = parser.parse_args() check_environment() if args.working: log(f"Using working folder: {args.working}") os.chdir(args.working) logger.setLevel('INFO') logger.addHandler(logging.FileHandler('runner.log', 'a')) setup_store() MAX_BENCHMARK_RUNS = args.max_benchmark_runs CLEAR_RANDOM_INPUTS = args.clear_random_inputs device = args.device if device: set_unique_node_id(f"{platform.node()}_{device}") check_device(device) else: set_unique_node_id(platform.node()) subset = None if args.subset: subset = [x.strip() for x in args.subset.split(',')] if args.cleanup_stale_pods: check_stale_pods(args.cleanup_stale_pods) dataset = os.getenv("INPUT_DATASET") experiment = os.getenv("EXPERIMENT_NAME", "facesynthetics") rc = monitor(experiment, dataset, device is not None, args.benchmark, subset, args.no_quantization) sys.exit(rc)
archai/tasks/face_segmentation/aml/azure/runner.py/0
{ "file_path": "archai/tasks/face_segmentation/aml/azure/runner.py", "repo_id": "archai", "token_count": 13234 }
345
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. def get_dlc_metrics(dlc_file): from olive.snpe.tools.dev import get_dlc_info, get_dlc_metrics csv_data = get_dlc_info(dlc_file) info = get_dlc_metrics(dlc_file) return (csv_data, info['macs'], info['parameters'])
archai/tasks/face_segmentation/aml/snpe/dlc_helper.py/0
{ "file_path": "archai/tasks/face_segmentation/aml/snpe/dlc_helper.py", "repo_id": "archai", "token_count": 123 }
346
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from pathlib import Path from argparse import ArgumentParser import os import time import torch import mlflow from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint from archai.datasets.cv.face_synthetics import FaceSyntheticsDatasetProvider from archai.discrete_search.search_spaces.config import ArchConfig from search_space.hgnet import StackedHourglass from training.pl_trainer import SegmentationTrainingLoop from archai.common.store import ArchaiStore from archai.common.config import Config def print_auto_logged_info(r): tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")} artifacts = [f.path for f in mlflow.MlflowClient().list_artifacts(r.info.run_id, "model")] print("run_id: {}".format(r.info.run_id)) print("artifacts: {}".format(artifacts)) print("params: {}".format(r.data.params)) print("metrics: {}".format(r.data.metrics)) print("tags: {}".format(tags)) def main(): parser = ArgumentParser() parser.add_argument('arch', type=Path) parser.add_argument('--dataset_dir', type=Path, help='Face Synthetics dataset directory.', required=True) parser.add_argument('--output_dir', type=Path, help='Output directory.', required=True) parser.add_argument('--lr', type=float, default=2e-4) parser.add_argument('--batch_size', type=int, default=16) parser.add_argument('--epochs', type=int, default=1) parser.add_argument('--val_check_interval', type=float, default=1.0) parser.add_argument('--model_id', type=str, default=None) parser.add_argument('--config', type=Path, default=None) parser.add_argument('--register', help="Specify whether to register the trained model with your mlflow workspace", action="store_true") args = parser.parse_args() model_id = args.model_id store: ArchaiStore = None epochs = 1 if args.epochs < 1 else args.epochs start_time = time.time() storing = False config = args.config experiment_name = None if config and config.is_file(): config = Config(str(config)) if 'aml' in config: # we are running in azure ml. aml_config = config['aml'] metric_key = config['training'].get('metric_key') connection_str = aml_config['connection_str'] experiment_name = aml_config['experiment_name'] storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(connection_str) store = ArchaiStore(storage_account_name, storage_account_key, table_name=experiment_name) storing = True try: if storing: print(f'Locking entity {model_id}') e = store.lock(model_id, 'training') if e is None: # force the reset of this lock so the training job can take it! # might be a left over from previous failed job. store.unlock_entity(store.get_status(model_id)) e = store.lock(model_id, 'training') pipeline_id = os.getenv('AZUREML_ROOT_RUN_ID') if pipeline_id is not None: e['pipeline_id'] = pipeline_id store.merge_status_entity(e) arch_config = ArchConfig.from_file(args.arch) model = StackedHourglass(arch_config, num_classes=18) pl_model = SegmentationTrainingLoop(model, lr=args.lr) dataset_prov = FaceSyntheticsDatasetProvider(args.dataset_dir) tr_dl = torch.utils.data.DataLoader( dataset_prov.get_train_dataset(), batch_size=args.batch_size, num_workers=8, shuffle=True ) val_dl = torch.utils.data.DataLoader( dataset_prov.get_val_dataset(), batch_size=args.batch_size, num_workers=8 ) callbacks = [ ModelCheckpoint( dirpath=str(args.output_dir / 'checkpoints'), monitor='validation_loss', mode='min', save_last=True, save_top_k=1, verbose=True, filename='{epoch}-{step}-{validation_loss:.2f}' ) ] trainer = Trainer( default_root_dir=str(args.output_dir), accelerator='gpu', val_check_interval=args.val_check_interval, max_epochs=epochs, callbacks=callbacks ) mlflow.pytorch.autolog(log_models=args.register, registered_model_name=model_id) with mlflow.start_run() as run: trainer.fit(pl_model, tr_dl, val_dl) print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id)) val_result = trainer.validate(trainer.model, val_dl) print(val_result) end_time = time.time() if storing: # post updated progress to our unified status table and unlock the row. metric = float(val_result[0]['validation_mIOU']) print(f"Storing {metric_key}={metric} for model {model_id}") e = store.get_status(model_id) e[metric_key] = metric e['status'] = 'complete' e['training_time'] = end_time - start_time store.unlock_entity(e) trainer.save_checkpoint(args.output_dir / 'model.ckpt') # Save onnx model. input_shape = (1, 3, 256, 256) rand_range = (0.0, 1.0) export_kwargs = {'opset_version': 11} rand_min, rand_max = rand_range sample_input = ((rand_max - rand_min) * torch.rand(*input_shape) + rand_min).type("torch.FloatTensor") onnx_file = str(args.output_dir / 'model.onnx') torch.onnx.export(model, (sample_input,), onnx_file, input_names=["input_0"], **export_kwargs, ) except Exception as ex: # record failed state. if storing: e['status'] = 'failed' e['error'] = str(ex) store.unlock_entity(e) else: print(ex) if __name__ == '__main__': main()
archai/tasks/face_segmentation/train.py/0
{ "file_path": "archai/tasks/face_segmentation/train.py", "repo_id": "archai", "token_count": 2614 }
347
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import argparse import json from transformers import ( AutoTokenizer, DataCollatorForLanguageModeling, GPT2Config, GPT2LMHeadModel, TrainingArguments, ) from archai.common.file_utils import check_available_checkpoint from archai.datasets.nlp.hf_dataset_provider import HfDiskDatasetProvider # from archai.datasets.nlp.hf_dataset_provider import HfHubDatasetProvider # from archai.datasets.nlp.hf_dataset_provider_utils import tokenize_contiguous_dataset from archai.trainers.nlp.hf_trainer import HfTrainer def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Trains a Pareto architecture from Transformer-Flex.") parser.add_argument("pareto_config_path", type=str, help="Path to the Pareto architecture configuration file.") parser.add_argument( "-o", "--output_dir", type=str, default="", help="Defines an output folder for the saved outputs.", ) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() tokenizer = AutoTokenizer.from_pretrained("gpt2", model_max_length=1024) tokenizer.pad_token = tokenizer.eos_token collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) # Users can use the Hugging Face's Hub to download the dataset instead # of downloading it manually # dataset_provider = HfHubDatasetProvider(dataset="the_pile", subset="plain_text") # train_dataset = dataset_provider.get_train_dataset() # encoded_train_dataset = train_dataset.map( # tokenize_contiguous_dataset, # batched=True, # fn_kwargs={"tokenizer": tokenizer, "model_max_length": 1024}, # remove_columns=train_dataset.column_names, # ) # We pre-encoded the dataset to speed up the training dataset_provider = HfDiskDatasetProvider("data/the_pile_gpt2_encoded_1024") encoded_train_dataset = dataset_provider.get_train_dataset() encoded_val_dataset = dataset_provider.get_val_dataset() encoded_test_dataset = dataset_provider.get_test_dataset() pareto_config = {} with open(args.pareto_config_path, "r") as f: pareto_config = json.load(f) config = GPT2Config(n_positions=1024, bos_token_id=0, eos_token_id=0, **pareto_config) model = GPT2LMHeadModel(config=config) print(f"Total parameters: {sum(p.numel() for p in model.parameters())}") training_args = TrainingArguments( args.output_dir, optim="adamw_torch", evaluation_strategy="no", logging_steps=10, per_device_train_batch_size=64, learning_rate=6e-4, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, lr_scheduler_type="cosine", warmup_steps=150, max_steps=30000, ) trainer = HfTrainer( model=model, args=training_args, data_collator=collator, train_dataset=encoded_train_dataset, eval_dataset=encoded_val_dataset, ) resume_from_checkpoint = check_available_checkpoint(args.output_dir) trainer_output = trainer.train(resume_from_checkpoint=resume_from_checkpoint) trainer.save_metrics("train", trainer_output.metrics) for log_metric in trainer.state.log_history[::-1]: if "eval_loss" in log_metric: trainer.save_metrics("eval", log_metric) break test_metric = trainer.evaluate(encoded_test_dataset, metric_key_prefix="test") trainer.save_metrics("test", test_metric)
archai/tasks/text_generation/train.py/0
{ "file_path": "archai/tasks/text_generation/train.py", "repo_id": "archai", "token_count": 1473 }
348
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import torch import os import shutil from archai.datasets.nlp.tokenizer_utils.gpt2_tokenizer import Gpt2Tokenizer from archai.datasets.nlp.nvidia_data_loader_utils import LMOrderedIterator, LMMultiFileIterator def test_lm_ordered_iterator(): # Assert that iterator returns the correct variables input_ids = torch.zeros(256) iterator = iter(LMOrderedIterator(input_ids, 1, 8)) input_ids, labels, seq_len, warmup = next(iterator) assert input_ids.shape == (1, 8) assert labels.shape == (1, 8) assert seq_len == 8 assert warmup is True # Assert that iterator is able to return data with different # batch size and sequence length input_ids = torch.zeros(512) iterator = iter(LMOrderedIterator(input_ids, 2, 16)) input_ids, labels, seq_len, warmup = next(iterator) assert input_ids.shape == (2, 16) assert labels.shape == (2, 16) assert seq_len == 16 assert warmup is True def test_lm_multi_file_iterator(): input_files = [f"tmp_{i}.txt" for i in range(5)] for input_file in input_files: with open(input_file, "w") as f: [f.write("lm multi file iterator test file") for i in range(10)] vocab = Gpt2Tokenizer("tokenizer") vocab.train(input_files) # Assert that iterator returns the correct variables iterator = iter(LMMultiFileIterator(input_files, vocab, 1, 8, n_chunks=2)) input_ids, labels, seq_len, warmup = next(iterator) assert input_ids.shape == (1, 8) assert labels.shape == (1, 8) assert seq_len == 8 assert warmup is True for input_file in input_files: os.remove(input_file) shutil.rmtree("tokenizer")
archai/tests/datasets/nlp/test_nvidia_data_loader_utils.py/0
{ "file_path": "archai/tests/datasets/nlp/test_nvidia_data_loader_utils.py", "repo_id": "archai", "token_count": 652 }
349
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import random from typing import Tuple import pytest import torch from archai.discrete_search.api.search_objectives import SearchObjectives from archai.discrete_search.evaluators.functional import EvaluationFunction from archai.discrete_search.evaluators.onnx_model import AvgOnnxLatency from archai.discrete_search.evaluators.pt_profiler import TorchFlops, TorchNumParameters from archai.discrete_search.search_spaces.nlp.transformer_flex.search_space import ( TransformerFlexSearchSpace, ) @pytest.fixture def search_space(): return TransformerFlexSearchSpace("gpt2") @pytest.fixture def models(search_space): return [search_space.random_sample() for _ in range(3)] @pytest.fixture def sample_input() -> torch.Tensor: return torch.zeros(1, 1, 192, dtype=torch.long) def test_eval_all_objs(models): search_objectives = SearchObjectives(cache_objective_evaluation=False) search_objectives.add_objective( "Number of parameters", TorchNumParameters(), higher_is_better=False, compute_intensive=False, constraint=(0.0, 5e5), ) search_objectives.add_objective( "OnnxLatency", AvgOnnxLatency(input_shape=(1, 1, 192), num_trials=1, input_dtype="torch.LongTensor"), higher_is_better=False, ) search_objectives.add_objective("Budget Value", EvaluationFunction(lambda m, b: b), higher_is_better=True) # Assert that objectives are evaluated and return a dictionary result = search_objectives.eval_all_objs( models, budgets={"Budget Value": list(range(len(models)))}, progress_bar=True ) assert all(len(r) == len(models) for r in result.values()) def test_eval_subsets(sample_input, models): num_params_obj = TorchNumParameters() num_params = [num_params_obj.evaluate(m) for m in models] max_params = max(num_params) search_objectives = SearchObjectives(cache_objective_evaluation=False) search_objectives.add_objective( "Flops", TorchFlops(forward_args=sample_input), higher_is_better=False, compute_intensive=False, constraint=(0.0, float("inf")), ) search_objectives.add_objective( "OnnxLatency", AvgOnnxLatency(input_shape=(1, 1, 192), num_trials=1, input_dtype="torch.LongTensor"), higher_is_better=False, ) search_objectives.add_constraint("NumParameters", TorchNumParameters(), (max_params - 0.5, max_params + 0.5)) search_objectives.add_objective("Budget Value", EvaluationFunction(lambda m, b: b), higher_is_better=True) # Assert that cheap objectives are evaluated and return a dictionary result = search_objectives.eval_cheap_objs( models, budgets={"Budget Value": list(range(len(models)))}, progress_bar=True ) assert set(result.keys()) == {"Flops"} # Assert that constraints are valid c_values, c_indices = search_objectives.validate_constraints(models) assert len(c_values) == 2 assert len(c_indices) == 1 # Assert that expensive objectives are evaluated and return a dictionary result = search_objectives.eval_expensive_objs( models, budgets={"Budget Value": list(range(len(models)))}, progress_bar=True ) assert set(result.keys()) == {"OnnxLatency", "Budget Value"} def test_eval_cache(sample_input, models): search_objectives = SearchObjectives(cache_objective_evaluation=True) search_objectives.add_objective( "Flops", TorchFlops(forward_args=sample_input), higher_is_better=False, compute_intensive=False, constraint=(0.0, float("inf")), ) search_objectives.add_objective( "OnnxLatency", AvgOnnxLatency(input_shape=(1, 1, 192), num_trials=1, input_dtype="torch.LongTensor"), higher_is_better=False, ) search_objectives.add_constraint("NumberOfParameters", TorchNumParameters(), (0, float("inf"))) search_objectives.add_constraint("Random number", EvaluationFunction(lambda m, b: random.random()), (0.0, 1.0)) # Assert that cheap objectives are evaluated and cached result = search_objectives.eval_cheap_objs(models, progress_bar=True) assert len(result) == 1 assert search_objectives.lookup_cache("Flops", models[0].archid, None) assert search_objectives.is_model_valid(models[0]) assert search_objectives.lookup_cache("NumberOfParameters", models[0].archid, None) assert search_objectives.lookup_cache("Random number", models[0].archid, None) # Assert that cached value is correct and constraints are valid cached_val = search_objectives.lookup_cache("Random number", models[0].archid, None) cons_vals, cons_filtered = search_objectives.validate_constraints(models, False) assert cons_vals["Random number"][0] == cached_val assert len(cons_filtered) == len(models)
archai/tests/discrete_search/api/test_search_objectives.py/0
{ "file_path": "archai/tests/discrete_search/api/test_search_objectives.py", "repo_id": "archai", "token_count": 1803 }
350
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os import pytest from transformers import GPT2LMHeadModel from archai.discrete_search.api.search_space import ( BayesOptSearchSpace, EvolutionarySearchSpace, ) from archai.discrete_search.search_spaces.nlp.transformer_flex.search_space import ( TransformerFlexSearchSpace, ) @pytest.fixture def config(): return { "arch_type": "gpt2", "min_layers": 2, "max_layers": 3, "d_inner_options": [256, 512, 1024], "d_model_options": [256, 512, 1024], "n_head_options": [2, 4, 8], "share_d_inner": True, "mutation_prob": 1.0, "vocab_size": 50257, "max_sequence_length": 1024, "att_dropout_rate": 0.1, "random_seed": 42, } def test_transformer_flex_search_space_init(config): search_space = TransformerFlexSearchSpace(**config) # Assert that the search space is set correctly assert search_space.arch_type == config["arch_type"] assert search_space.min_layers == config["min_layers"] assert search_space.max_layers == config["max_layers"] assert search_space.options["d_inner"]["values"] == config["d_inner_options"] assert search_space.options["d_model"]["values"] == config["d_model_options"] assert search_space.options["n_head"]["values"] == config["n_head_options"] assert search_space.options["d_inner"]["share"] == config["share_d_inner"] assert search_space.mutation_prob == config["mutation_prob"] assert search_space.vocab_size == config["vocab_size"] assert search_space.max_sequence_length == config["max_sequence_length"] assert search_space.att_dropout_rate == config["att_dropout_rate"] # Assert that the search space is a subclass of BayesOptSearchSpace # and EvolutionarySearchSpace assert isinstance(search_space, EvolutionarySearchSpace) assert isinstance(search_space, BayesOptSearchSpace) def test_transformer_flex_search_space_load_model_from_config(config): # Assert that the model is loaded correctly search_space = TransformerFlexSearchSpace(**config) model_config = {"d_model": 256, "n_head": 2, "d_inner": 256, "n_layer": 2} model = search_space._load_model_from_config(model_config) assert isinstance(model, GPT2LMHeadModel) def test_transformer_flex_search_space_get_archid(config): # Assert that the archid is generated correctly search_space = TransformerFlexSearchSpace(**config) model_config = {"d_model": 256, "n_head": 2, "d_inner": 256, "n_layer": 2} archid = search_space.get_archid(model_config) assert archid == "gpt2_9d72dac1ada7e094f5a7fd67dc688e33348d4907" def test_transformer_flex_search_space_random_sample(config): # Assert that a model is sampled correctly search_space = TransformerFlexSearchSpace(**config) arch_model = search_space.random_sample() assert arch_model.archid == "gpt2_df9751a4db6ffaa963687eeae3f04d8c764f5f9c" assert isinstance(arch_model.arch, GPT2LMHeadModel) def test_transformer_flex_search_space_save_arch(config): # Assert that a model is saved correctly search_space = TransformerFlexSearchSpace(**config) arch_model = search_space.random_sample() search_space.save_arch(arch_model, "test_arch.json") assert os.path.exists("test_arch.json") def test_transformer_flex_search_space_load_arch(config): # Assert that a model is loaded correctly search_space = TransformerFlexSearchSpace(**config) arch_model = search_space.load_arch("test_arch.json") os.remove("test_arch.json") assert arch_model.archid == "gpt2_df9751a4db6ffaa963687eeae3f04d8c764f5f9c" def test_transformer_flex_search_space_mutate(config): # Assert that a model is mutated correctly search_space = TransformerFlexSearchSpace(**config) arch_model = search_space.random_sample() mutated_arch_model = search_space.mutate(arch_model) assert mutated_arch_model.archid != arch_model.archid def test_transformer_flex_search_space_crossover(config): search_space = TransformerFlexSearchSpace(**config) arch_model1 = search_space.random_sample() arch_model2 = search_space.random_sample() # Assert that a model is crossovered correctly crossovered_arch_model = search_space.crossover([arch_model1, arch_model2]) assert crossovered_arch_model.archid != arch_model1.archid assert crossovered_arch_model.archid != arch_model2.archid def test_transformer_flex_search_space_encode(config): # Assert that a model is encoded correctly search_space = TransformerFlexSearchSpace(**config) arch_model = search_space.random_sample() gene = search_space.encode(arch_model) assert gene == [2, 256, 1024, 4]
archai/tests/discrete_search/search_spaces/nlp/transformer_flex/test_transformer_flex_search_space.py/0
{ "file_path": "archai/tests/discrete_search/search_spaces/nlp/transformer_flex/test_transformer_flex_search_space.py", "repo_id": "archai", "token_count": 1759 }
351
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os import pytest import torch from transformers import GPT2Config, GPT2LMHeadModel from archai.common.file_utils import create_file_name_identifier from archai.onnx.export import export_to_onnx from archai.onnx.optimization import optimize_onnx from archai.quantization.ptq import ( dynamic_quantization_onnx, dynamic_quantization_torch, ) @pytest.fixture def onnx_model_path(): model = GPT2LMHeadModel(config=GPT2Config(vocab_size=1, n_layer=1)) onnx_model_path = "temp_model.onnx" onnx_config = export_to_onnx(model, onnx_model_path) ort_model_path = optimize_onnx(onnx_model_path, onnx_config) yield ort_model_path os.remove(onnx_model_path) os.remove(ort_model_path) @pytest.fixture def model(): class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.fc1 = torch.nn.Linear(10, 20) self.fc2 = torch.nn.Linear(20, 30) self.word_emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=20) self.transformer = torch.nn.ModuleDict( { "wpe": torch.nn.Embedding(num_embeddings=10, embedding_dim=20), "wte": torch.nn.Embedding(num_embeddings=10, embedding_dim=20), } ) def forward(self, x): return x return DummyModel() def test_dynamic_quantization_onnx(onnx_model_path): # Assert that the quantized model exists qnt_model_path = dynamic_quantization_onnx(onnx_model_path) assert qnt_model_path == create_file_name_identifier(onnx_model_path, "-int8") assert os.path.exists(qnt_model_path) os.remove(qnt_model_path) def test_dynamic_quantization_torch(model): # Assert that the quantized model has the expected properties model_qnt = dynamic_quantization_torch(model) assert isinstance(model_qnt, torch.nn.Module) assert isinstance(model_qnt.fc1, torch.nn.quantized.Linear) assert isinstance(model_qnt.fc2, torch.nn.quantized.Linear) assert isinstance(model_qnt.word_emb, torch.nn.quantized.Embedding) assert isinstance(model_qnt.transformer["wpe"], torch.nn.quantized.Embedding) assert isinstance(model_qnt.transformer["wte"], torch.nn.quantized.Embedding)
archai/tests/quantization/test_ptq.py/0
{ "file_path": "archai/tests/quantization/test_ptq.py", "repo_id": "archai", "token_count": 1014 }
352
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import torch from torch.optim import Optimizer from archai.trainers.coin_betting_optimizer import CocobBackprop, CocobOns def test_cocob_backprop(): model = torch.nn.Linear(5, 5) loss_fn = torch.nn.MSELoss() optimizer = CocobBackprop(model.parameters(), alpha=100.0, eps=1e-8) # Asserts that the optimizer works with a dummy forward and backward passes optimizer.zero_grad() outputs = model(torch.randn(10, 5)) loss = loss_fn(outputs, torch.randn(10, 5)) loss.backward() assert loss.shape == torch.Size([]) def test_cocob_ons(): model = torch.nn.Linear(5, 5) loss_fn = torch.nn.MSELoss() optimizer = CocobOns(model.parameters(), eps=1e-8) # Asserts that the optimizer works with a dummy forward and backward passes optimizer.zero_grad() outputs = model(torch.randn(10, 5)) loss = loss_fn(outputs, torch.randn(10, 5)) loss.backward() assert loss.shape == torch.Size([])
archai/tests/trainers/test_coin_betting_optimizer.py/0
{ "file_path": "archai/tests/trainers/test_coin_betting_optimizer.py", "repo_id": "archai", "token_count": 388 }
353
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import pprint import unittest from msrest import Deserializer from msrest.universal_http import HTTPClientResponse class _TestResponse(HTTPClientResponse): def __init__(self, text): super(_TestResponse, self).__init__(request=None, internal_response=None) self._text = text def text(self, encoding=None): return self._text class TestDeserialization(unittest.TestCase): # https://github.com/microsoft/azure-devops-python-api/issues/268 def test_deserialization_issue_268_71(self): from azure.devops.v7_1.task_agent import models self._test_deserialization(models.__dict__.items(), _268_type, _268_json) # https://github.com/microsoft/azure-devops-python-api/issues/268 def test_deserialization_issue_268_70(self): from azure.devops.v7_0.task_agent import models self._test_deserialization(models.__dict__.items(), _268_type, _268_json) @staticmethod def _test_deserialization(models, data_type, json): client_models = {k: v for k, v in models if isinstance(v, type)} deserializer = Deserializer(client_models) response = _TestResponse(json) task_agent_response = deserializer(data_type, response) pprint.pprint(task_agent_response.__dict__) if __name__ == '__main__': unittest.main() _268_type = 'TaskAgentReference' _268_json = '{"id":0,"name":null,"version":null,"osDescription":"Foo","provisioningState":null}'
azure-devops-python-api/azure-devops/azure/devops/issue_tests/test_issue_268.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/issue_tests/test_issue_268.py", "repo_id": "azure-devops-python-api", "token_count": 597 }
354
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest.serialization import Model class Issue(Model): """ :param category: :type category: str :param data: :type data: dict :param message: :type message: str :param type: :type type: object """ _attribute_map = { 'category': {'key': 'category', 'type': 'str'}, 'data': {'key': 'data', 'type': '{str}'}, 'message': {'key': 'message', 'type': 'str'}, 'type': {'key': 'type', 'type': 'object'} } def __init__(self, category=None, data=None, message=None, type=None): super(Issue, self).__init__() self.category = category self.data = data self.message = message self.type = type class JobOption(Model): """ Represents an option that may affect the way an agent runs the job. :param data: :type data: dict :param id: Gets the id of the option. :type id: str """ _attribute_map = { 'data': {'key': 'data', 'type': '{str}'}, 'id': {'key': 'id', 'type': 'str'} } def __init__(self, data=None, id=None): super(JobOption, self).__init__() self.data = data self.id = id class MaskHint(Model): """ :param type: :type type: object :param value: :type value: str """ _attribute_map = { 'type': {'key': 'type', 'type': 'object'}, 'value': {'key': 'value', 'type': 'str'} } def __init__(self, type=None, value=None): super(MaskHint, self).__init__() self.type = type self.value = value class PlanEnvironment(Model): """ :param mask: :type mask: list of :class:`MaskHint <azure.devops.v7_0.task.models.MaskHint>` :param options: :type options: dict :param variables: :type variables: dict """ _attribute_map = { 'mask': {'key': 'mask', 'type': '[MaskHint]'}, 'options': {'key': 'options', 'type': '{JobOption}'}, 'variables': {'key': 'variables', 'type': '{str}'} } def __init__(self, mask=None, options=None, variables=None): super(PlanEnvironment, self).__init__() self.mask = mask self.options = options self.variables = variables class ProjectReference(Model): """ :param id: :type id: str :param name: :type name: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'} } def __init__(self, id=None, name=None): super(ProjectReference, self).__init__() self.id = id self.name = name class ReferenceLinks(Model): """ The class to represent a collection of REST reference links. :param links: The readonly view of the links. Because Reference links are readonly, we only want to expose them as read only. :type links: dict """ _attribute_map = { 'links': {'key': 'links', 'type': '{object}'} } def __init__(self, links=None): super(ReferenceLinks, self).__init__() self.links = links class TaskAgentJob(Model): """ :param container: :type container: str :param id: :type id: str :param name: :type name: str :param sidecar_containers: :type sidecar_containers: dict :param steps: :type steps: list of :class:`TaskAgentJobStep <azure.devops.v7_0.task.models.TaskAgentJobStep>` :param variables: :type variables: list of :class:`TaskAgentJobVariable <azure.devops.v7_0.task.models.TaskAgentJobVariable>` """ _attribute_map = { 'container': {'key': 'container', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'sidecar_containers': {'key': 'sidecarContainers', 'type': '{str}'}, 'steps': {'key': 'steps', 'type': '[TaskAgentJobStep]'}, 'variables': {'key': 'variables', 'type': '[TaskAgentJobVariable]'} } def __init__(self, container=None, id=None, name=None, sidecar_containers=None, steps=None, variables=None): super(TaskAgentJob, self).__init__() self.container = container self.id = id self.name = name self.sidecar_containers = sidecar_containers self.steps = steps self.variables = variables class TaskAgentJobStep(Model): """ :param condition: :type condition: str :param continue_on_error: :type continue_on_error: bool :param enabled: :type enabled: bool :param env: :type env: dict :param id: :type id: str :param inputs: :type inputs: dict :param name: :type name: str :param retry_count_on_task_failure: :type retry_count_on_task_failure: int :param task: :type task: :class:`TaskAgentJobTask <azure.devops.v7_0.task.models.TaskAgentJobTask>` :param timeout_in_minutes: :type timeout_in_minutes: int :param type: :type type: object """ _attribute_map = { 'condition': {'key': 'condition', 'type': 'str'}, 'continue_on_error': {'key': 'continueOnError', 'type': 'bool'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, 'env': {'key': 'env', 'type': '{str}'}, 'id': {'key': 'id', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '{str}'}, 'name': {'key': 'name', 'type': 'str'}, 'retry_count_on_task_failure': {'key': 'retryCountOnTaskFailure', 'type': 'int'}, 'task': {'key': 'task', 'type': 'TaskAgentJobTask'}, 'timeout_in_minutes': {'key': 'timeoutInMinutes', 'type': 'int'}, 'type': {'key': 'type', 'type': 'object'} } def __init__(self, condition=None, continue_on_error=None, enabled=None, env=None, id=None, inputs=None, name=None, retry_count_on_task_failure=None, task=None, timeout_in_minutes=None, type=None): super(TaskAgentJobStep, self).__init__() self.condition = condition self.continue_on_error = continue_on_error self.enabled = enabled self.env = env self.id = id self.inputs = inputs self.name = name self.retry_count_on_task_failure = retry_count_on_task_failure self.task = task self.timeout_in_minutes = timeout_in_minutes self.type = type class TaskAgentJobTask(Model): """ :param id: :type id: str :param name: :type name: str :param version: :type version: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'version': {'key': 'version', 'type': 'str'} } def __init__(self, id=None, name=None, version=None): super(TaskAgentJobTask, self).__init__() self.id = id self.name = name self.version = version class TaskAgentJobVariable(Model): """ :param name: :type name: str :param secret: :type secret: bool :param value: :type value: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'secret': {'key': 'secret', 'type': 'bool'}, 'value': {'key': 'value', 'type': 'str'} } def __init__(self, name=None, secret=None, value=None): super(TaskAgentJobVariable, self).__init__() self.name = name self.secret = secret self.value = value class TaskAttachment(Model): """ :param _links: :type _links: :class:`ReferenceLinks <azure.devops.v7_0.task.models.ReferenceLinks>` :param created_on: :type created_on: datetime :param last_changed_by: :type last_changed_by: str :param last_changed_on: :type last_changed_on: datetime :param name: :type name: str :param record_id: :type record_id: str :param timeline_id: :type timeline_id: str :param type: :type type: str """ _attribute_map = { '_links': {'key': '_links', 'type': 'ReferenceLinks'}, 'created_on': {'key': 'createdOn', 'type': 'iso-8601'}, 'last_changed_by': {'key': 'lastChangedBy', 'type': 'str'}, 'last_changed_on': {'key': 'lastChangedOn', 'type': 'iso-8601'}, 'name': {'key': 'name', 'type': 'str'}, 'record_id': {'key': 'recordId', 'type': 'str'}, 'timeline_id': {'key': 'timelineId', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'} } def __init__(self, _links=None, created_on=None, last_changed_by=None, last_changed_on=None, name=None, record_id=None, timeline_id=None, type=None): super(TaskAttachment, self).__init__() self._links = _links self.created_on = created_on self.last_changed_by = last_changed_by self.last_changed_on = last_changed_on self.name = name self.record_id = record_id self.timeline_id = timeline_id self.type = type class TaskLogReference(Model): """ :param id: :type id: int :param location: :type location: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'int'}, 'location': {'key': 'location', 'type': 'str'} } def __init__(self, id=None, location=None): super(TaskLogReference, self).__init__() self.id = id self.location = location class TaskOrchestrationItem(Model): """ :param item_type: :type item_type: object """ _attribute_map = { 'item_type': {'key': 'itemType', 'type': 'object'} } def __init__(self, item_type=None): super(TaskOrchestrationItem, self).__init__() self.item_type = item_type class TaskOrchestrationOwner(Model): """ :param _links: :type _links: :class:`ReferenceLinks <azure.devops.v7_0.task.models.ReferenceLinks>` :param id: :type id: int :param name: :type name: str """ _attribute_map = { '_links': {'key': '_links', 'type': 'ReferenceLinks'}, 'id': {'key': 'id', 'type': 'int'}, 'name': {'key': 'name', 'type': 'str'} } def __init__(self, _links=None, id=None, name=None): super(TaskOrchestrationOwner, self).__init__() self._links = _links self.id = id self.name = name class TaskOrchestrationPlanGroupsQueueMetrics(Model): """ :param count: :type count: int :param status: :type status: object """ _attribute_map = { 'count': {'key': 'count', 'type': 'int'}, 'status': {'key': 'status', 'type': 'object'} } def __init__(self, count=None, status=None): super(TaskOrchestrationPlanGroupsQueueMetrics, self).__init__() self.count = count self.status = status class TaskOrchestrationPlanReference(Model): """ :param artifact_location: :type artifact_location: str :param artifact_uri: :type artifact_uri: str :param definition: :type definition: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param owner: :type owner: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param plan_group: :type plan_group: str :param plan_id: :type plan_id: str :param plan_type: :type plan_type: str :param scope_identifier: :type scope_identifier: str :param version: :type version: int """ _attribute_map = { 'artifact_location': {'key': 'artifactLocation', 'type': 'str'}, 'artifact_uri': {'key': 'artifactUri', 'type': 'str'}, 'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'}, 'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'}, 'plan_group': {'key': 'planGroup', 'type': 'str'}, 'plan_id': {'key': 'planId', 'type': 'str'}, 'plan_type': {'key': 'planType', 'type': 'str'}, 'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'}, 'version': {'key': 'version', 'type': 'int'} } def __init__(self, artifact_location=None, artifact_uri=None, definition=None, owner=None, plan_group=None, plan_id=None, plan_type=None, scope_identifier=None, version=None): super(TaskOrchestrationPlanReference, self).__init__() self.artifact_location = artifact_location self.artifact_uri = artifact_uri self.definition = definition self.owner = owner self.plan_group = plan_group self.plan_id = plan_id self.plan_type = plan_type self.scope_identifier = scope_identifier self.version = version class TaskOrchestrationQueuedPlan(Model): """ :param assign_time: :type assign_time: datetime :param definition: :type definition: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param owner: :type owner: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param plan_group: :type plan_group: str :param plan_id: :type plan_id: str :param pool_id: :type pool_id: int :param queue_position: :type queue_position: int :param queue_time: :type queue_time: datetime :param scope_identifier: :type scope_identifier: str """ _attribute_map = { 'assign_time': {'key': 'assignTime', 'type': 'iso-8601'}, 'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'}, 'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'}, 'plan_group': {'key': 'planGroup', 'type': 'str'}, 'plan_id': {'key': 'planId', 'type': 'str'}, 'pool_id': {'key': 'poolId', 'type': 'int'}, 'queue_position': {'key': 'queuePosition', 'type': 'int'}, 'queue_time': {'key': 'queueTime', 'type': 'iso-8601'}, 'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'} } def __init__(self, assign_time=None, definition=None, owner=None, plan_group=None, plan_id=None, pool_id=None, queue_position=None, queue_time=None, scope_identifier=None): super(TaskOrchestrationQueuedPlan, self).__init__() self.assign_time = assign_time self.definition = definition self.owner = owner self.plan_group = plan_group self.plan_id = plan_id self.pool_id = pool_id self.queue_position = queue_position self.queue_time = queue_time self.scope_identifier = scope_identifier class TaskOrchestrationQueuedPlanGroup(Model): """ :param definition: :type definition: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param owner: :type owner: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param plan_group: :type plan_group: str :param plans: :type plans: list of :class:`TaskOrchestrationQueuedPlan <azure.devops.v7_0.task.models.TaskOrchestrationQueuedPlan>` :param project: :type project: :class:`ProjectReference <azure.devops.v7_0.task.models.ProjectReference>` :param queue_position: :type queue_position: int """ _attribute_map = { 'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'}, 'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'}, 'plan_group': {'key': 'planGroup', 'type': 'str'}, 'plans': {'key': 'plans', 'type': '[TaskOrchestrationQueuedPlan]'}, 'project': {'key': 'project', 'type': 'ProjectReference'}, 'queue_position': {'key': 'queuePosition', 'type': 'int'} } def __init__(self, definition=None, owner=None, plan_group=None, plans=None, project=None, queue_position=None): super(TaskOrchestrationQueuedPlanGroup, self).__init__() self.definition = definition self.owner = owner self.plan_group = plan_group self.plans = plans self.project = project self.queue_position = queue_position class TaskReference(Model): """ :param id: :type id: str :param inputs: :type inputs: dict :param name: :type name: str :param version: :type version: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '{str}'}, 'name': {'key': 'name', 'type': 'str'}, 'version': {'key': 'version', 'type': 'str'} } def __init__(self, id=None, inputs=None, name=None, version=None): super(TaskReference, self).__init__() self.id = id self.inputs = inputs self.name = name self.version = version class TimelineAttempt(Model): """ :param attempt: Gets or sets the attempt of the record. :type attempt: int :param identifier: Gets or sets the unique identifier for the record. :type identifier: str :param record_id: Gets or sets the record identifier located within the specified timeline. :type record_id: str :param timeline_id: Gets or sets the timeline identifier which owns the record representing this attempt. :type timeline_id: str """ _attribute_map = { 'attempt': {'key': 'attempt', 'type': 'int'}, 'identifier': {'key': 'identifier', 'type': 'str'}, 'record_id': {'key': 'recordId', 'type': 'str'}, 'timeline_id': {'key': 'timelineId', 'type': 'str'} } def __init__(self, attempt=None, identifier=None, record_id=None, timeline_id=None): super(TimelineAttempt, self).__init__() self.attempt = attempt self.identifier = identifier self.record_id = record_id self.timeline_id = timeline_id class TimelineRecord(Model): """ :param agent_specification: :type agent_specification: :class:`object <azure.devops.v7_0.task.models.object>` :param attempt: :type attempt: int :param current_operation: :type current_operation: str :param details: :type details: :class:`TimelineReference <azure.devops.v7_0.task.models.TimelineReference>` :param error_count: :type error_count: int :param finish_time: :type finish_time: datetime :param change_id: :type change_id: int :param id: :type id: str :param identifier: :type identifier: str :param issues: :type issues: list of :class:`Issue <azure.devops.v7_0.task.models.Issue>` :param last_modified: :type last_modified: datetime :param location: :type location: str :param log: :type log: :class:`TaskLogReference <azure.devops.v7_0.task.models.TaskLogReference>` :param name: :type name: str :param order: :type order: int :param parent_id: :type parent_id: str :param percent_complete: :type percent_complete: int :param previous_attempts: :type previous_attempts: list of :class:`TimelineAttempt <azure.devops.v7_0.task.models.TimelineAttempt>` :param queue_id: :type queue_id: int :param ref_name: :type ref_name: str :param result: :type result: object :param result_code: :type result_code: str :param start_time: :type start_time: datetime :param state: :type state: object :param task: :type task: :class:`TaskReference <azure.devops.v7_0.task.models.TaskReference>` :param type: :type type: str :param variables: :type variables: dict :param warning_count: :type warning_count: int :param worker_name: :type worker_name: str """ _attribute_map = { 'agent_specification': {'key': 'agentSpecification', 'type': 'object'}, 'attempt': {'key': 'attempt', 'type': 'int'}, 'current_operation': {'key': 'currentOperation', 'type': 'str'}, 'details': {'key': 'details', 'type': 'TimelineReference'}, 'error_count': {'key': 'errorCount', 'type': 'int'}, 'finish_time': {'key': 'finishTime', 'type': 'iso-8601'}, 'change_id': {'key': 'changeId', 'type': 'int'}, 'id': {'key': 'id', 'type': 'str'}, 'identifier': {'key': 'identifier', 'type': 'str'}, 'issues': {'key': 'issues', 'type': '[Issue]'}, 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, 'location': {'key': 'location', 'type': 'str'}, 'log': {'key': 'log', 'type': 'TaskLogReference'}, 'name': {'key': 'name', 'type': 'str'}, 'order': {'key': 'order', 'type': 'int'}, 'parent_id': {'key': 'parentId', 'type': 'str'}, 'percent_complete': {'key': 'percentComplete', 'type': 'int'}, 'previous_attempts': {'key': 'previousAttempts', 'type': '[TimelineAttempt]'}, 'queue_id': {'key': 'queueId', 'type': 'int'}, 'ref_name': {'key': 'refName', 'type': 'str'}, 'result': {'key': 'result', 'type': 'object'}, 'result_code': {'key': 'resultCode', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'state': {'key': 'state', 'type': 'object'}, 'task': {'key': 'task', 'type': 'TaskReference'}, 'type': {'key': 'type', 'type': 'str'}, 'variables': {'key': 'variables', 'type': '{VariableValue}'}, 'warning_count': {'key': 'warningCount', 'type': 'int'}, 'worker_name': {'key': 'workerName', 'type': 'str'} } def __init__(self, agent_specification=None, attempt=None, current_operation=None, details=None, error_count=None, finish_time=None, change_id=None, id=None, identifier=None, issues=None, last_modified=None, location=None, log=None, name=None, order=None, parent_id=None, percent_complete=None, previous_attempts=None, queue_id=None, ref_name=None, result=None, result_code=None, start_time=None, state=None, task=None, type=None, variables=None, warning_count=None, worker_name=None): super(TimelineRecord, self).__init__() self.agent_specification = agent_specification self.attempt = attempt self.current_operation = current_operation self.details = details self.error_count = error_count self.finish_time = finish_time self.change_id = change_id self.id = id self.identifier = identifier self.issues = issues self.last_modified = last_modified self.location = location self.log = log self.name = name self.order = order self.parent_id = parent_id self.percent_complete = percent_complete self.previous_attempts = previous_attempts self.queue_id = queue_id self.ref_name = ref_name self.result = result self.result_code = result_code self.start_time = start_time self.state = state self.task = task self.type = type self.variables = variables self.warning_count = warning_count self.worker_name = worker_name class TimelineRecordFeedLinesWrapper(Model): """ :param count: :type count: int :param end_line: :type end_line: long :param start_line: :type start_line: long :param step_id: :type step_id: str :param value: :type value: list of str """ _attribute_map = { 'count': {'key': 'count', 'type': 'int'}, 'end_line': {'key': 'endLine', 'type': 'long'}, 'start_line': {'key': 'startLine', 'type': 'long'}, 'step_id': {'key': 'stepId', 'type': 'str'}, 'value': {'key': 'value', 'type': '[str]'} } def __init__(self, count=None, end_line=None, start_line=None, step_id=None, value=None): super(TimelineRecordFeedLinesWrapper, self).__init__() self.count = count self.end_line = end_line self.start_line = start_line self.step_id = step_id self.value = value class TimelineReference(Model): """ :param change_id: :type change_id: int :param id: :type id: str :param location: :type location: str """ _attribute_map = { 'change_id': {'key': 'changeId', 'type': 'int'}, 'id': {'key': 'id', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'} } def __init__(self, change_id=None, id=None, location=None): super(TimelineReference, self).__init__() self.change_id = change_id self.id = id self.location = location class VariableValue(Model): """ :param is_read_only: :type is_read_only: bool :param is_secret: :type is_secret: bool :param value: :type value: str """ _attribute_map = { 'is_read_only': {'key': 'isReadOnly', 'type': 'bool'}, 'is_secret': {'key': 'isSecret', 'type': 'bool'}, 'value': {'key': 'value', 'type': 'str'} } def __init__(self, is_read_only=None, is_secret=None, value=None): super(VariableValue, self).__init__() self.is_read_only = is_read_only self.is_secret = is_secret self.value = value class TaskLog(TaskLogReference): """ :param id: :type id: int :param location: :type location: str :param created_on: :type created_on: datetime :param index_location: :type index_location: str :param last_changed_on: :type last_changed_on: datetime :param line_count: :type line_count: long :param path: :type path: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'int'}, 'location': {'key': 'location', 'type': 'str'}, 'created_on': {'key': 'createdOn', 'type': 'iso-8601'}, 'index_location': {'key': 'indexLocation', 'type': 'str'}, 'last_changed_on': {'key': 'lastChangedOn', 'type': 'iso-8601'}, 'line_count': {'key': 'lineCount', 'type': 'long'}, 'path': {'key': 'path', 'type': 'str'} } def __init__(self, id=None, location=None, created_on=None, index_location=None, last_changed_on=None, line_count=None, path=None): super(TaskLog, self).__init__(id=id, location=location) self.created_on = created_on self.index_location = index_location self.last_changed_on = last_changed_on self.line_count = line_count self.path = path class TaskOrchestrationContainer(TaskOrchestrationItem): """ :param item_type: :type item_type: object :param continue_on_error: :type continue_on_error: bool :param data: :type data: dict :param children: :type children: list of :class:`TaskOrchestrationItem <azure.devops.v7_0.task.models.TaskOrchestrationItem>` :param max_concurrency: :type max_concurrency: int :param parallel: :type parallel: bool :param rollback: :type rollback: :class:`TaskOrchestrationContainer <azure.devops.v7_0.task.models.TaskOrchestrationContainer>` """ _attribute_map = { 'item_type': {'key': 'itemType', 'type': 'object'}, 'continue_on_error': {'key': 'continueOnError', 'type': 'bool'}, 'data': {'key': 'data', 'type': '{str}'}, 'children': {'key': 'children', 'type': '[TaskOrchestrationItem]'}, 'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'}, 'parallel': {'key': 'parallel', 'type': 'bool'}, 'rollback': {'key': 'rollback', 'type': 'TaskOrchestrationContainer'} } def __init__(self, item_type=None, continue_on_error=None, data=None, children=None, max_concurrency=None, parallel=None, rollback=None): super(TaskOrchestrationContainer, self).__init__(item_type=item_type) self.continue_on_error = continue_on_error self.data = data self.children = children self.max_concurrency = max_concurrency self.parallel = parallel self.rollback = rollback class TaskOrchestrationPlan(TaskOrchestrationPlanReference): """ :param artifact_location: :type artifact_location: str :param artifact_uri: :type artifact_uri: str :param definition: :type definition: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param owner: :type owner: :class:`TaskOrchestrationOwner <azure.devops.v7_0.task.models.TaskOrchestrationOwner>` :param plan_group: :type plan_group: str :param plan_id: :type plan_id: str :param plan_type: :type plan_type: str :param scope_identifier: :type scope_identifier: str :param version: :type version: int :param environment: :type environment: :class:`PlanEnvironment <azure.devops.v7_0.task.models.PlanEnvironment>` :param expanded_yaml: :type expanded_yaml: :class:`TaskLogReference <azure.devops.v7_0.task.models.TaskLogReference>` :param finish_time: :type finish_time: datetime :param implementation: :type implementation: :class:`TaskOrchestrationContainer <azure.devops.v7_0.task.models.TaskOrchestrationContainer>` :param initialization_log: :type initialization_log: :class:`TaskLogReference <azure.devops.v7_0.task.models.TaskLogReference>` :param requested_by_id: :type requested_by_id: str :param requested_for_id: :type requested_for_id: str :param result: :type result: object :param result_code: :type result_code: str :param start_time: :type start_time: datetime :param state: :type state: object :param timeline: :type timeline: :class:`TimelineReference <azure.devops.v7_0.task.models.TimelineReference>` """ _attribute_map = { 'artifact_location': {'key': 'artifactLocation', 'type': 'str'}, 'artifact_uri': {'key': 'artifactUri', 'type': 'str'}, 'definition': {'key': 'definition', 'type': 'TaskOrchestrationOwner'}, 'owner': {'key': 'owner', 'type': 'TaskOrchestrationOwner'}, 'plan_group': {'key': 'planGroup', 'type': 'str'}, 'plan_id': {'key': 'planId', 'type': 'str'}, 'plan_type': {'key': 'planType', 'type': 'str'}, 'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'}, 'version': {'key': 'version', 'type': 'int'}, 'environment': {'key': 'environment', 'type': 'PlanEnvironment'}, 'expanded_yaml': {'key': 'expandedYaml', 'type': 'TaskLogReference'}, 'finish_time': {'key': 'finishTime', 'type': 'iso-8601'}, 'implementation': {'key': 'implementation', 'type': 'TaskOrchestrationContainer'}, 'initialization_log': {'key': 'initializationLog', 'type': 'TaskLogReference'}, 'requested_by_id': {'key': 'requestedById', 'type': 'str'}, 'requested_for_id': {'key': 'requestedForId', 'type': 'str'}, 'result': {'key': 'result', 'type': 'object'}, 'result_code': {'key': 'resultCode', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'state': {'key': 'state', 'type': 'object'}, 'timeline': {'key': 'timeline', 'type': 'TimelineReference'} } def __init__(self, artifact_location=None, artifact_uri=None, definition=None, owner=None, plan_group=None, plan_id=None, plan_type=None, scope_identifier=None, version=None, environment=None, expanded_yaml=None, finish_time=None, implementation=None, initialization_log=None, requested_by_id=None, requested_for_id=None, result=None, result_code=None, start_time=None, state=None, timeline=None): super(TaskOrchestrationPlan, self).__init__(artifact_location=artifact_location, artifact_uri=artifact_uri, definition=definition, owner=owner, plan_group=plan_group, plan_id=plan_id, plan_type=plan_type, scope_identifier=scope_identifier, version=version) self.environment = environment self.expanded_yaml = expanded_yaml self.finish_time = finish_time self.implementation = implementation self.initialization_log = initialization_log self.requested_by_id = requested_by_id self.requested_for_id = requested_for_id self.result = result self.result_code = result_code self.start_time = start_time self.state = state self.timeline = timeline class Timeline(TimelineReference): """ :param change_id: :type change_id: int :param id: :type id: str :param location: :type location: str :param last_changed_by: :type last_changed_by: str :param last_changed_on: :type last_changed_on: datetime :param records: :type records: list of :class:`TimelineRecord <azure.devops.v7_0.task.models.TimelineRecord>` """ _attribute_map = { 'change_id': {'key': 'changeId', 'type': 'int'}, 'id': {'key': 'id', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'last_changed_by': {'key': 'lastChangedBy', 'type': 'str'}, 'last_changed_on': {'key': 'lastChangedOn', 'type': 'iso-8601'}, 'records': {'key': 'records', 'type': '[TimelineRecord]'} } def __init__(self, change_id=None, id=None, location=None, last_changed_by=None, last_changed_on=None, records=None): super(Timeline, self).__init__(change_id=change_id, id=id, location=location) self.last_changed_by = last_changed_by self.last_changed_on = last_changed_on self.records = records __all__ = [ 'Issue', 'JobOption', 'MaskHint', 'PlanEnvironment', 'ProjectReference', 'ReferenceLinks', 'TaskAgentJob', 'TaskAgentJobStep', 'TaskAgentJobTask', 'TaskAgentJobVariable', 'TaskAttachment', 'TaskLogReference', 'TaskOrchestrationItem', 'TaskOrchestrationOwner', 'TaskOrchestrationPlanGroupsQueueMetrics', 'TaskOrchestrationPlanReference', 'TaskOrchestrationQueuedPlan', 'TaskOrchestrationQueuedPlanGroup', 'TaskReference', 'TimelineAttempt', 'TimelineRecord', 'TimelineRecordFeedLinesWrapper', 'TimelineReference', 'VariableValue', 'TaskLog', 'TaskOrchestrationContainer', 'TaskOrchestrationPlan', 'Timeline', ]
azure-devops-python-api/azure-devops/azure/devops/v7_0/task/models.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/task/models.py", "repo_id": "azure-devops-python-api", "token_count": 14342 }
355
include *.txt include *.py include *.md recursive-include azure *.py exclude MANIFEST.in exclude environment.yml recursive-exclude azure *.typed recursive-exclude eng * recursive-exclude tests *
azure-quantum-python/azure-quantum/MANIFEST.in/0
{ "file_path": "azure-quantum-python/azure-quantum/MANIFEST.in", "repo_id": "azure-quantum-python", "token_count": 65 }
356
## # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. ## import abc from azure.quantum.job.session import SessionHost from typing import TYPE_CHECKING, Any if TYPE_CHECKING: import cirq from azure.quantum import Job as AzureJob from azure.quantum import Workspace from azure.quantum.cirq.job import Job as CirqJob class Target(abc.ABC, SessionHost): """Abstract base class for Cirq targets""" @abc.abstractstaticmethod def _translate_cirq_circuit(circuit): """Translate Cirq circuit to native provider format.""" pass @classmethod def _translate_circuit(cls, circuit: Any): """Translate circuit into native provider format""" try: return cls._translate_cirq_circuit(circuit) except Exception as e: raise ValueError( f"Cannot translate circuit of type {circuit.__class__}: {e}") @abc.abstractstaticmethod def _to_cirq_result(result: Any) -> "cirq.Result": """Convert native hardware result to cirq.Result""" pass @abc.abstractmethod def _to_cirq_job(self, azure_job: "AzureJob", *args, **kwargs): """Convert Azure job to Cirq job""" pass @abc.abstractmethod def submit( self, program: "cirq.Circuit", name: str = "cirq-job", repetitions: int = 500, **kwargs ) -> "CirqJob": """Submit a Cirq quantum circuit :param program: Quantum program :type program: cirq.Circuit :param name: Job name :type name: str :param repetitions: Number of shots, defaults to provider default value :type repetitions: int :return: Azure Quantum job :rtype: Job """ pass @abc.abstractmethod def _get_azure_workspace(self) -> "Workspace": raise NotImplementedError @abc.abstractmethod def _get_azure_target_id(self) -> str: raise NotImplementedError @abc.abstractmethod def _get_azure_provider_id(self) -> str: raise NotImplementedError
azure-quantum-python/azure-quantum/azure/quantum/cirq/targets/target.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/cirq/targets/target.py", "repo_id": "azure-quantum-python", "token_count": 885 }
357
## # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. ## from typing import TYPE_CHECKING, Dict from azure.quantum.version import __version__ from azure.quantum.target.rigetti import RigettiTarget from abc import abstractmethod from .backend import AzureQirBackend from qiskit.providers.models import BackendConfiguration from qiskit.providers import Options, Provider QIR_BASIS_GATES = [ "measure", "m", "cx", "cz", "h", "reset", "rx", "ry", "rz", "s", "sdg", "t", "tdg", "x", "y", "z", "id", ] if TYPE_CHECKING: from azure.quantum.qiskit import AzureQuantumProvider import logging logger = logging.getLogger(__name__) __all__ = ["RigettiSimulatorBackend" "RigettiQPUBackend"] _DEFAULT_SHOTS_COUNT = 500 class RigettiBackend(AzureQirBackend): """Base class for interfacing with a Rigetti backend in Azure Quantum""" _SHOTS_PARAM_NAME = "count" @abstractmethod def __init__( self, configuration: BackendConfiguration, provider: Provider = None, **fields ): super().__init__(configuration, provider, **fields) @classmethod def _default_options(cls): other_options = { cls._SHOTS_PARAM_NAME: _DEFAULT_SHOTS_COUNT, } return Options(targetCapability="BasicExecution", **other_options) def _azure_config(self) -> Dict[str, str]: config = super()._azure_config() config.update( { "provider_id": "rigetti", } ) return config class RigettiSimulatorBackend(RigettiBackend): backend_names = RigettiTarget.simulators() def __init__(self, name: str, provider: "AzureQuantumProvider", **kwargs): """Base class for interfacing with an Rigetti Simulator backend""" default_config = BackendConfiguration.from_dict( { "backend_name": name, "backend_version": __version__, "simulator": True, "local": False, "coupling_map": None, "description": "Rigetti simulator on Azure Quantum", "basis_gates": QIR_BASIS_GATES, "memory": False, "n_qubits": RigettiTarget.num_qubits(name), "conditional": False, "max_shots": 10000, "max_experiments": 1, "open_pulse": False, "gates": [{"name": "TODO", "parameters": [], "qasm_def": "TODO"}], "azure": self._azure_config(), "is_default": True, } ) logger.info("Initializing RigettiSimulatorBackend") configuration: BackendConfiguration = kwargs.pop( "configuration", default_config ) super().__init__(configuration=configuration, provider=provider, **kwargs) class RigettiQPUBackend(RigettiBackend): backend_names = RigettiTarget.qpus() def __init__(self, name: str, provider: "AzureQuantumProvider", **kwargs): """Base class for interfacing with a Rigetti QPU backend""" default_config = BackendConfiguration.from_dict( { "backend_name": name, "backend_version": __version__, "simulator": False, "local": False, "coupling_map": None, "description": "Rigetti QPU on Azure Quantum", "basis_gates": QIR_BASIS_GATES, "memory": False, "n_qubits": RigettiTarget.num_qubits(name), "conditional": False, "max_shots": 10000, "max_experiments": 1, "open_pulse": False, "gates": [{"name": "TODO", "parameters": [], "qasm_def": "TODO"}], "azure": self._azure_config(), "is_default": True, } ) logger.info("Initializing RigettiQPUBackend") configuration: BackendConfiguration = kwargs.pop( "configuration", default_config ) super().__init__(configuration=configuration, provider=provider, **kwargs)
azure-quantum-python/azure-quantum/azure/quantum/qiskit/backends/rigetti.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/qiskit/backends/rigetti.py", "repo_id": "azure-quantum-python", "token_count": 1983 }
358
## # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. ## from dataclasses import dataclass, field from re import sub from typing import Any, Dict, List, Optional, Tuple, Type, Callable from ..argument_types import EmptyArray, Pauli, Range, Result __all__ = ["InputParams"] class EntryPointArguments: """ Wrapper class to set QIR entry point arguments. This class is used to set QIR entry point arguments inside the InputParamsItem class. It overrides the __setitem__ method to automatically detect the entry point's argument from the passed value. """ # Maps supported Python types to QIR entry point type names and a callable # that extracts the serialized value from the original value. (list (= # Array) is handled as a special case.) type_map: Dict[type, Tuple[str, Callable]] = { int: ("Int", lambda v: v), float: ("Double", lambda v: v), bool: ("Boolean", lambda v: v), str: ("String", lambda v: v), Pauli: ("Pauli", lambda v: v.value), Result: ("Result", lambda v: v.value), Range: ("Range", lambda v: v.value) } def __init__(self): self.entries = [] def __setitem__(self, name: str, value: Any): """ Creates entry point argument entry and automatically determines the type from value. """ if type(value) == list: if len(value) == 0: raise ValueError("Use EmptyArray(type) to assign an empty " "error") else: first = value[0] first_value, first_type = self._extract_value_and_type(first) values = [first_value] for next in value[1:]: next_value, next_type = self._extract_value_and_type(next) if next_type != first_type: raise TypeError("All elements in a list must have " "the same type") values.append(next_value) self.entries.append( {"name": name, "value": values, "type": "Array", "elementType": first_type}) elif type(value) == EmptyArray: element_type = self._extract_type(value.element_type) self.entries.append( {"name": name, "value": [], "type": "Array", "elementType": element_type}) else: entry_value, entry_type = self._extract_value_and_type(value) self.entries.append( {"name": name, "value": entry_value, "type": entry_type}) def _extract_type(self, type: Type) -> str: """ Convert Python type to QIR entry point argument type name. """ if type in self.type_map: return self.type_map[type][0] elif type == list: raise TypeError(f"Nested lists are not supported") else: type_name = type.__name__ raise TypeError(f"Unsupported type {type_name}") def _extract_value_and_type(self, value: Any) -> Tuple[Any, str]: """ Convert Python value to QIR entry point argument type name and serialized value. """ if type(value) in self.type_map: entry_type, entry_value_func = self.type_map[type(value)] return entry_value_func(value), entry_type elif type(value) == list: raise TypeError(f"Nested lists are not supported") else: type_name = type(value).__name__ raise TypeError(f"Unsupported type {type_name} for {value}") @dataclass class AutoValidatingParams: """ A helper class for target parameters. It has a function as_dict that automatically extracts a dictionary from the class' fields. They are added to the result dictionary if their value is not None, the key is automatically transformed from Python snake case to camel case, and if validate is True and if the field has a validation function, the field is validated beforehand. """ def as_dict(self, validate=True): result = {} for name, field in self.__dataclass_fields__.items(): field_value = self.__getattribute__(name) if field_value is not None: # validate field? if validate and "validate" in field.metadata: func = field.metadata["validate"] # check for indirect call (like in @staticmethod) if hasattr(func, "__func__"): func = func.__func__ func(name, field_value) # translate field name to camel case s = sub(r"(_|-)+", " ", name).title().replace(" ", "") attribute = ''.join([s[0].lower(), s[1:]]) result[attribute] = field_value if validate: self.post_validation(result) return result def post_validation(self, result): """ A function that is called after all individual fields have been validated, but before the result is returned. Here result is the current dictionary. """ pass def validating_field(validation_func, default=None): """ A helper method to declare field for an AutoValidatingParams data class. """ return field(default=default, metadata={"validate": validation_func}) class InputParamsItem: """ Base class for input parameters. This class serves both as the base class for InputParams as well as for the items in the InputParams. """ def __init__(self): # all input param items may have an entry point name and a list of # arguments self.entry_point: Optional[str] = None self.arguments = EntryPointArguments() def as_dict(self, validate=True) -> Dict[str, Any]: """ Returns input params as a dictionary. """ result = {} if self.entry_point is not None: result['entryPoint'] = self.entry_point if len(self.arguments.entries) > 0: result['arguments'] = self.arguments.entries return result class InputParams(InputParamsItem): """ Class to define input parameters. This class allows to define input parameters for non-batching and batching jobs. The instance represents a batching job, if and only if num_items is set to some positive number less or equal to MAX_NUM_ITEMS. Both this class and the items in this class are based on InputParamsItem as a template, which can be overriden for specializations created by a target. This class should never be constructed directly but only through the InputParams.make_params method. """ MAX_NUM_ITEMS: int = 1000 def __init__( self, num_items: Optional[int] = None, item_type: Type[InputParamsItem] = InputParamsItem): """ Constructs a InputParams instance. The item_type argument should be set by targets that override InputParams and have a specialized InputParamsItem class. """ item_type.__init__(self) # fileURIs self.file_uris = {} if num_items is not None: self.has_items = True if num_items <= 0 or num_items > self.MAX_NUM_ITEMS: raise ValueError( "num_items must be a positive value less or equal to " f"{self.MAX_NUM_ITEMS}") self._items = [item_type() for _ in range(num_items)] else: self.has_items = False self.item_type = item_type @property def items(self) -> List: if self.has_items: return self._items else: raise Exception("Cannot access items in a non-batching job, call " "make_params with num_items parameter") def as_dict(self, validate=True) -> Dict[str, Any]: """ Constructs a dictionary from the input params. For batching jobs, top-level entries are merged into item entries. Item entries have priority in case they are specified. """ # initialize result and set type hint result: Dict[str, Any] = self.item_type.as_dict(self, validate) if self.has_items: result["items"] = [item.as_dict(validate) for item in self._items] # In case of batching, no need to stop if failing an item result["resumeAfterFailedItem"] = True # add fileUris if existing if len(self.file_uris) > 0: result["fileUris"] = self.file_uris return result
azure-quantum-python/azure-quantum/azure/quantum/target/params.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/target/params.py", "repo_id": "azure-quantum-python", "token_count": 3817 }
359
# Azure Quantum Python API examples * [Resource estimator examples](https://github.com/microsoft/qdk-python/tree/main/azure-quantum/examples/resource_estimation)
azure-quantum-python/azure-quantum/examples/README.md/0
{ "file_path": "azure-quantum-python/azure-quantum/examples/README.md", "repo_id": "azure-quantum-python", "token_count": 47 }
360
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. <# .SYNOPSIS Install-Artifacts: set up a Python environment using Anaconda #> $PackageDir = Split-Path -parent $PSScriptRoot; $PackageName = $PackageDir | Split-Path -Leaf; $RootDir = Split-Path -parent $PackageDir; Import-Module (Join-Path $RootDir "build" "conda-utils.psm1"); Import-Module (Join-Path $RootDir "build" "package-utils.psm1"); # Enable conda hook Enable-Conda NewCondaEnvForPackage -PackageName $PackageName if (-not $Env:PYTHON_OUTDIR) { "" | Write-Host "== Environment variable $Env:PYTHON_OUTDIR is not set. " | Write-Host "== We will install $PackageName from source." | Write-Host "" | Write-Host Install-PackageInEnv -PackageName "$PackageName[all]" -FromSource $True "" | Write-Host "== $PackageName installed from source. ==" | Write-Host "" | Write-Host } elseif (-not (Test-Path $Env:PYTHON_OUTDIR)) { "" | Write-Warning "== The environment variable PYTHON_OUTDIR is set, but pointing to an invalid location ($Env:PYTHON_OUTDIR)" | Write-Warning "== To use build artifacts, download the artifacts locally and point the variable to this folder." | Write-Warning "" | Write-Warning Exit 1 } # this condition is used by the E2E Live test pipeline elseif ($Env:PICK_QDK_VERSION -eq "auto") { "== Installing latest published $PackageName package from PyPI..." | Write-Host Install-PackageInEnv -PackageName $PackageName -FromSource $False } else { "== Preparing environment to use artifacts with version '$Env:PYTHON_VERSION' " | Write-Host "== from '$Env:PYTHON_OUTDIR'" | Write-Host if ($Env:PYTHON_VERSION) { $NameAndVersion = "$PackageName[all]==$($Env:PYTHON_VERSION)" } else { $NameAndVersion = "$PackageName[all]" } Install-PackageInEnv -PackageName $NameAndVersion -FromSource $False -BuildArtifactPath $Env:PYTHON_OUTDIR "" | Write-Host "== $PackageName installed from build artifacts. ==" | Write-Host "" | Write-Host }
azure-quantum-python/azure-quantum/tests.live/Install-Artifacts.ps1/0
{ "file_path": "azure-quantum-python/azure-quantum/tests.live/Install-Artifacts.ps1", "repo_id": "azure-quantum-python", "token_count": 741 }
361
## # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. ## import pytest import numpy as np from azure.quantum._client.models import CostEstimate, UsageEvent from azure.quantum.target import IonQ from common import QuantumTestBase, DEFAULT_TIMEOUT_SECS class TestIonQ(QuantumTestBase): def _3_qubit_ghz(self): return { "qubits": 3, "circuit": [ { "gate": "h", "target": 0 }, { "gate": "cnot", "control": 0, "target": 1 }, { "gate": "cnot", "control": 0, "target": 2 }, ] } @pytest.mark.ionq def test_estimate_cost_ionq(self): workspace = self.create_workspace() circuit = self._3_qubit_ghz() target = IonQ(workspace=workspace, name="ionq.simulator") cost = target.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 0.0) target = IonQ(workspace=workspace, name="ionq.qpu") cost = target.estimate_cost(circuit, shots=100e3) self.assertEqual(np.round(cost.estimated_total), 63.0) @pytest.mark.ionq @pytest.mark.live_test def test_job_submit_ionq(self): self._test_job_submit_ionq(shots=None) @pytest.mark.ionq @pytest.mark.live_test def test_job_submit_ionq_100_shots(self): self._test_job_submit_ionq(shots=100) @pytest.mark.ionq @pytest.mark.live_test def test_job_submit_ionq_100_shots_with_deprecated_num_shots(self): # Call submit with a depteracted 'num_shots' argument, need to emit a deptecation warning. with pytest.warns( DeprecationWarning, match="The 'num_shots' parameter will be deprecated. Please, use 'shots' parameter instead." ): self._test_job_submit_ionq(shots=100, shots_as_deprecated_num_shots=True) @pytest.mark.ionq @pytest.mark.live_test def test_job_submit_ionq_with_shots_and_num_shots(self): workspace = self.create_workspace() circuit = self._3_qubit_ghz() target = IonQ(workspace=workspace) shots = 100 with pytest.warns( DeprecationWarning, match="Both 'shots' and 'num_shots' parameters were specified. Defaulting to 'shots' parameter. " "Please, use 'shots' since 'num_shots' will be deprecated." ): job = target.submit( circuit=circuit, shots=shots, num_shots=10, ) job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS) assert job.details.input_params["shots"] == shots @pytest.mark.ionq @pytest.mark.live_test def test_job_submit_ionq_with_shots_from_input_params(self): workspace = self.create_workspace() circuit = self._3_qubit_ghz() target = IonQ(workspace=workspace) shots = 100 job = target.submit( circuit=circuit, input_params={"shots": shots}, ) job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS) assert job.details.input_params["shots"] == shots @pytest.mark.ionq @pytest.mark.live_test def test_job_submit_ionq_with_conflicting_shots_from_input_params(self): workspace = self.create_workspace() circuit = self._3_qubit_ghz() target = IonQ(workspace=workspace) shots = 100 with pytest.warns( match="Parameter 'shots' conflicts with the 'shots' field of the 'input_params' parameter. " "Please, provide only one option for setting shots. Defaulting to 'shots' parameter.", ): job = target.submit( circuit=circuit, shots=shots, input_params={"shots": 20}, ) job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS) assert job.details.input_params["shots"] == shots @pytest.mark.ionq @pytest.mark.live_test def test_job_submit_ionq_cost_estimate(self): job = self._test_job_submit_ionq(shots=None) self.assertIsNotNone(job.details) cost_estimate: CostEstimate = job.details.cost_estimate self.assertIsNotNone(cost_estimate) self.assertEqual(cost_estimate.currency_code, "USD") events: list[UsageEvent] = cost_estimate.events self.assertGreater(len(events), 0) self.assertGreaterEqual(cost_estimate.estimated_total, 0) def _test_job_submit_ionq( self, shots: int = None, shots_as_deprecated_num_shots: bool = False, circuit=None ): workspace = self.create_workspace() if circuit is None: circuit = self._3_qubit_ghz() target = IonQ(workspace=workspace) self.assertEqual("ionq.simulator", target.name) self.assertEqual("ionq.circuit.v1", target.input_data_format) self.assertEqual("ionq.quantum-results.v1", target.output_data_format) self.assertEqual("IonQ", target.provider_id) self.assertEqual("application/json", target.content_type) self.assertEqual("", target.encoding) additional_kwargs = {} if shots is not None: if shots_as_deprecated_num_shots: additional_kwargs["num_shots"] = shots else: additional_kwargs["shots"] = shots job = target.submit( circuit=circuit, name="ionq-3ghz-job", **additional_kwargs, ) job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS) # Check if job succeeded self.assertEqual(True, job.has_completed()) self.assertEqual(job.details.status, "Succeeded") self.resume_recording() job.refresh() job = workspace.get_job(job.id) self.assertEqual(True, job.has_completed()) if job.has_completed(): results = job.get_results(timeout_secs=DEFAULT_TIMEOUT_SECS) self.assertIn("histogram", results) self.assertEqual(results["histogram"]["0"], 0.5) self.assertEqual(results["histogram"]["7"], 0.5) if shots is not None: self.assertEqual(job.details.input_params.get("shots"), shots) else: self.assertIsNone(job.details.input_params.get("shots")) return job @pytest.mark.ionq @pytest.mark.live_test def test_ionq_qpu_target(self): workspace = self.create_workspace() target = IonQ(workspace=workspace, name="ionq.qpu") self.assertEqual("ionq.qpu", target.name) self.assertEqual("ionq.circuit.v1", target.input_data_format) self.assertEqual("ionq.quantum-results.v1", target.output_data_format) self.assertEqual("IonQ", target.provider_id) self.assertEqual("application/json", target.content_type) self.assertEqual("", target.encoding)
azure-quantum-python/azure-quantum/tests/unit/test_ionq.py/0
{ "file_path": "azure-quantum-python/azure-quantum/tests/unit/test_ionq.py", "repo_id": "azure-quantum-python", "token_count": 3419 }
362
<jupyter_start><jupyter_text>👋🌍 Hello, world: Submit a Qiskit job to IonQIn this notebook, we'll review the basics of Azure Quantum by submitting a simple *job*, or quantum program, to [IonQ](https://ionq.com/). We will use [Qiskit](https://qiskit.org/) to express the quantum job. Submit a simple job to IonQ using Azure QuantumAzure Quantum provides several ways to express quantum programs. In this example we are using Qiskit, but note that Q and Cirq are also supported. All code in this example will be written in Python.Let's begin. When you see a code block, hover over it and click the triangle play-button to execute it. To avoid any compilation issues, this should be done in order from top to bottom. 1. Connect to the Azure Quantum workspaceTo connect to the Azure Quantum service, construct an instance of the `AzureQuantumProvider`. Note that it's imported from `azure.quantum.qiskit`.<jupyter_code>from azure.quantum import Workspace from azure.quantum.qiskit import AzureQuantumProvider workspace = Workspace( resource_id = "", location = "", ) provider = AzureQuantumProvider(workspace)<jupyter_output><empty_output><jupyter_text>Let's see what providers and targets are enabled in this workspace with the following command:<jupyter_code>from qiskit import QuantumCircuit from qiskit.visualization import plot_histogram print("This workspace's targets:") for backend in provider.backends(): print("- " + backend.name())<jupyter_output><empty_output><jupyter_text>❕ Do you see `ionq.simulator` in your list of targets? If so, you're ready to keep going.Don't see it? You may need to add IonQ to your workspace to run this sample. Navigate to the **Providers** page in the portal and click **+Add** to add the IonQ provider. Don't worry, there's a free credits plan available. IonQ: The quantum providerAzure Quantum partners with third-party companies to deliver solutions to quantum jobs. These company offerings are called *providers*. Each provider can offer multiple *targets* with different capabilities. See the table below for IonQ's targets.| Target name | Target ID | Number of qubits | Description || --- | --- | --- | --- || Quantum simulator | `ionq.simulator` | 29 qubits | IonQ's cloud-based idealized simulator. Free of cost. || Aria 1 | `ionq.qpu.aria-1` | 23 qubits | IonQ's Aria 1 trapped-ion quantum computer. This is real quantum hardware, not a simulation. || Quantum computer | `ionq.qpu` | 11 qubits | IonQ's trapped-ion quantum computer. This is real quantum hardware, not a simulation. |For this example, we will use `ionq.simulator`. To learn more about IonQ's targets, check out our [documentation](https://learn.microsoft.com/azure/quantum/provider-ionq). 2. Build the quantum programLet's create a simple Qiskit circuit to run.<jupyter_code># Create a quantum circuit acting on a single qubit circuit = QuantumCircuit(1,1) circuit.name = "Single qubit random" circuit.h(0) circuit.measure(0, 0) # Print out the circuit circuit.draw()<jupyter_output><empty_output><jupyter_text>The circuit you built is a simple quantum random bit generator. With IonQ's idealized simulator, we will be able to calculate the probability of measuring a `1` or `0`. 3. Submit the quantum program to IonQ<jupyter_code># Create an object that represents IonQ's simulator target, "ionq.simulator". # Note that any target you have enabled in this workspace can # be used here. Azure Quantum makes it extremely easy to submit # the same quantum program to different providers. ionq_simulator_backend = provider.get_backend("ionq.simulator") # Using the IonQ simulator target, call "run" to submit the job. We'll # use 100 shots (simulated runs). job = ionq_simulator_backend.run(circuit, shots=100) print("Job id:", job.id())<jupyter_output><empty_output><jupyter_text>The job ID can be used to retrieve the results later using the [get_job method](https://learn.microsoft.com/python/azure-quantum/azure.quantum.workspace?azure-quantum-workspace-get-job) or by viewing it under the **Job management** section of the portal. 4. Obtain the job resultsThis may take a minute or so ⏳. Your job will be packaged and sent to IonQ, where it will wait its turn to be run.<jupyter_code>result = job.result() # The result object is native to the Qiskit package, so we can use Qiskit's tools to print the result as a histogram. plot_histogram(result.get_counts(circuit), title="Result")<jupyter_output><empty_output><jupyter_text>**See the histogram above? Congratulations, you've submitted a job with Azure Quantum! 👏** 5. Estimate costsTo estimate the costs of running this program on a simulator or hardware, you can use the `backend.estimate_cost` method.<jupyter_code>backend = provider.get_backend("ionq.qpu") cost = backend.estimate_cost(circuit, shots=100) print(f"Estimated cost: {cost.estimated_total} {cost.currency_code}")<jupyter_output><empty_output>
azure-quantum-python/samples/hello-world/HW-ionq-qiskit.ipynb/0
{ "file_path": "azure-quantum-python/samples/hello-world/HW-ionq-qiskit.ipynb", "repo_id": "azure-quantum-python", "token_count": 1411 }
363
--- page_type: sample description: "This sample shows you how to work with sessions in Azure Quantum" languages: - python products: - azure-quantum --- # Introduction to Sessions This sample shows you how to work with sessions in Azure Quantum by using a session to run multiple Qiskit jobs. This sample is available as part of the Azure Quantum notebook samples gallery in the Azure Portal. For an example of how to run these notebooks in Azure, see [this getting started guide](https://learn.microsoft.com/azure/quantum/get-started-jupyter-notebook). ## Manifest - [introduction-to-sessions.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/sessions/introduction-to-sessions.ipynb): Python + Qiskit notebook sample for the how to work with sessions.
azure-quantum-python/samples/sessions/README.md/0
{ "file_path": "azure-quantum-python/samples/sessions/README.md", "repo_id": "azure-quantum-python", "token_count": 215 }
364
/*------------------------------------ Copyright (c) Microsoft Corporation. Licensed under the MIT License. All rights reserved. ------------------------------------ */ import * as React from "react"; import { createRoot } from 'react-dom/client'; import { SpaceDiagram, TimeDiagram } from "quantum-visualization"; import { initializeIcons } from '@fluentui/react/lib/Icons'; initializeIcons(); const randomId = () => { return Math.random().toString(36).substring(7); }; class SpaceDiagramComponent extends HTMLElement { connectedCallback() { const divId = "space-diagram-" + randomId(); this.innerHTML = `<div id=${divId}> </div>`; const data = this.getAttribute("data"); if (data) { const root = createRoot( document.getElementById(divId) ); root.render(<SpaceDiagram data={data}/>); } else { console.error("Rendering error: Space Diagram requires data."); } } } class TimeDiagramComponent extends HTMLElement { connectedCallback() { const divId = "time-diagram-" + randomId(); this.innerHTML = `<div id=${divId}> </div>`; const data = this.getAttribute("data"); if (data) { const root = createRoot( document.getElementById(divId) ); root.render(<TimeDiagram data={data}/>); } else { console.error("Rendering error: Time Diagram requires data."); } } } window.customElements.get("re-space-diagram") || window.customElements.define("re-space-diagram", SpaceDiagramComponent); window.customElements.get("re-time-diagram") || window.customElements.define("re-time-diagram", TimeDiagramComponent);
azure-quantum-python/visualization/js-lib/src/index.js/0
{ "file_path": "azure-quantum-python/visualization/js-lib/src/index.js", "repo_id": "azure-quantum-python", "token_count": 565 }
365
/*------------------------------------ Copyright (c) Microsoft Corporation. Licensed under the MIT License. All rights reserved. ------------------------------------ */ import React from "react"; import { IColumn, IGroup, ThemeProvider } from "@fluentui/react"; import { JobResults } from "../../models/JobResults"; import LineChart from "../d3-visualization-components/LineChart"; import { GetColumns } from "../table/Column"; import { IItem, IState, TableComponent } from "../table/Table"; import "./Diagram.css"; export interface TimeDiagramProps { data: string; } // Takes the runtime string from the data and formats it with the appropriate symbol for unit of time. // If no mapping is found, the runtime will be returned with a space between the time and label. function FormatRuntime(rawRuntime : string) : string{ /* Define time abbreviation mapping */ let timeMap : Map<string, string> = new Map([ ["milliseconds", "ms"], ["millisecs", "ms"], ["seconds", "s"], ["secs", "s"], ["minutes", "min"], ["mins", "min"], ["hours", "h"], ["hrs", "h"], ["days", "d"], ["weeks", "wk"], ["wks", "wk"], ["months", "mo"], ["mos", "mo"], ["years", "yr"], ["yrs", "yr"], ["microseconds", "\u00B5s"], ["nanoseconds", "ns"], ["picoseconds", "ps"], ["microsecs", "\u00B5s"], ["nanosecs", "ns"], ["picosecs", "ps"] ]); const runTimeArray = rawRuntime.split(/(\d+)/).filter(Boolean); const runTimeUnit = timeMap.get(runTimeArray[1]); var runTimeFormatted = ""; if(runTimeUnit){ runTimeFormatted = `${runTimeArray[0]} ${runTimeUnit}`; } else{ runTimeFormatted = `${runTimeArray[0]} ${runTimeArray[1]}`; } return runTimeFormatted; } function TimeDiagram({ data }: TimeDiagramProps) { // Parse job results data. const jobResults = JSON.parse(data) as JobResults; /*------------------------------ Configure canvas sizing ------------------------------ */ const diagramRef = React.useRef<any>(); const [width, setWidth] = React.useState(0); const [height, setHeight] = React.useState(0); const handleWidth = () => { const width = diagramRef?.current?.offsetWidth; if (width) { setWidth(width); } }; const handleSize = () => { handleWidth(); const height = diagramRef?.current?.offsetHeight; if (height) { setHeight(height); } }; React.useLayoutEffect(() => { handleSize(); window.addEventListener("resize", handleWidth); }, [diagramRef.current]); /*------------------------------ Define and parse table and chart data ------------------------------ */ const algorithmRuntimeFormatted = FormatRuntime(jobResults.physicalCountsFormatted.runtime); const tFactoryRuntimeFormatted = FormatRuntime(jobResults.physicalCountsFormatted.tfactoryRuntime); const logicalCycleTimeFormatted = FormatRuntime (jobResults.physicalCountsFormatted.logicalCycleTime); const numTFactoryInvocations = jobResults.physicalCounts.breakdown.numTfactoryRuns; const numTfactories = jobResults.physicalCounts.breakdown.numTfactories; const numTStatesPerSingleTfactory = jobResults.tfactory.numTstates; const numTStatesAllTfactoriesOneInvocation = numTStatesPerSingleTfactory * numTfactories; const numTStatesPerInvocationString = "Output T states of single T factory (" + numTStatesPerSingleTfactory + ") * T factories (" + numTfactories + ") = " + numTStatesAllTfactoriesOneInvocation + " T states produced by a single invocation of all T factories."; const tableItems: IItem[] = [ { name: "Algorithm runtime", value: algorithmRuntimeFormatted, description: "Total runtime of algorithm.", }, { name: "T factory runtime", value: tFactoryRuntimeFormatted, description: "Runtime of a single T factory.", }, { name: "T factory copies", value: numTfactories.toLocaleString(), description: "Number of T factories executed in parallel capable of producing the demanded T states during the algorithm's runtime.", }, { name: "T factory invocations", value: numTFactoryInvocations.toLocaleString(), description: "Number of times all T factories are invoked concurrently.", }, { name: "T states per single T factory run", value: numTStatesPerSingleTfactory.toLocaleString(), description: "Number of T states produced by a single T factory run.", }, { name: "T states per invocation", value: numTStatesAllTfactoriesOneInvocation.toLocaleString(), description: numTStatesPerInvocationString, }, { name: "Logical depth", value: jobResults.physicalCounts.breakdown.logicalDepth.toLocaleString(), description: "A single T factory may cause logical depth to increase from algorithmic logical depth if its execution time is slower than the algorithm's.", }, { name: "Algorithmic logical depth", value: jobResults.physicalCounts.breakdown.algorithmicLogicalDepth.toLocaleString(), description: "Number of logical cycles for the algorithm.", }, { name: "T gates", value: jobResults.logicalCounts.tCount.toLocaleString(), description: "Number of T gates in the input quantum program.", }, { name: "R gates", value: jobResults.logicalCounts.rotationCount.toLocaleString(), description: "Number of rotation gates in the input quantum program.", }, { name: "Logical depth rotation gates", value: jobResults.logicalCounts.rotationDepth.toLocaleString(), description: "Depth of rotation gates in the input quantum program.", }, { name: "CCZ gates", value: jobResults.logicalCounts.cczCount.toLocaleString(), description: "Number of CCZ-gates in the input quantum program.", }, { name: "CCiX gates", value: jobResults.logicalCounts.ccixCount.toLocaleString(), description: "Number of CCiX-gates in the input quantum program.", }, { name: "Measurement operations", value: jobResults.logicalCounts.measurementCount.toLocaleString(), description: "Number of single qubit measurements in the input quantum program.", }, { name: "Logical cycle time", value: logicalCycleTimeFormatted, description: "Duration of a logical cycle in nanoseconds.", }, ]; const tableGroups: IGroup[] = [ { key: "1", name: "Physical resource estimates", startIndex: 0, count: 1, }, { key: "2", name: "T factory parameters", startIndex: 1, count: 1, }, { key: "3", name: "Resource estimation breakdown", startIndex: 2, count: 6, }, { key: "4", name: "Pre-layout logical resources", startIndex: 8, count: 6, }, { key: "5", name: "Logical cycle time", startIndex: 14, count: 1, }, ]; /*------------------------------ Create table ------------------------------ */ const tableProps: IState = { items: tableItems, groups: tableGroups, showItemIndexInView: false, isCompactMode: false, }; const columns: IColumn[] = GetColumns(); const Table = () => ( <ThemeProvider> <TableComponent state={tableProps} columns={columns} /> </ThemeProvider> ); /*------------------------------ Create chart data dictionary ------------------------------ */ const chartDictionary: { [key: string]: any } = { numberTFactoryInvocations: numTFactoryInvocations.toString(), numberTStates: numTStatesAllTfactoriesOneInvocation, algorithmRuntime: jobResults.physicalCounts.runtime, tFactoryRuntime: jobResults.tfactory.runtime, algorithmRuntimeFormatted: algorithmRuntimeFormatted, tFactoryRuntimeFormatted: tFactoryRuntimeFormatted, }; return ( <div className="grid-container"> <div className="diagram" ref={diagramRef}> <LineChart chartData={chartDictionary} width={width} height={height} ></LineChart> </div> <div className="table"> <Table /> </div> </div> ); } export default TimeDiagram;
azure-quantum-python/visualization/react-lib/src/components/resource-estimator/TimeDiagram.tsx/0
{ "file_path": "azure-quantum-python/visualization/react-lib/src/components/resource-estimator/TimeDiagram.tsx", "repo_id": "azure-quantum-python", "token_count": 2936 }
366
Frequently Asked Questions ========================== What is a bistring, anyway? --------------------------- Simply put, a `bistring` is a pair of strings, an original string and a modified one, along with information about how they align with each other. The :class:`bistring.bistr` class has an API very similar to the built-in :class:`str`, but all its operations keep track of the original string and the alignment for you. >>> from bistring import bistr >>> s = bistr('HELLO WORLD') >>> print(s) ⮎'HELLO WORLD'⮌ >>> s = s.lower() >>> print(s) ('HELLO WORLD' ⇋ 'hello world') >>> print(s[6:]) ('WORLD' ⇋ 'world') Why am I getting more text than I expect when slicing? ------------------------------------------------------ When a bistring doesn't have precise enough alignment information to slice exactly, it will give you back the smallest string it knows for certain contains a match for the region you requested. In the worst case, that may be the entire string! This happens, for example, when you use the two-argument `bistr` constructor, which makes no effort to infer a granular alignment between the strings: >>> s = bistr('color', 'colour') >>> print(s[3:5]) ('color' ⇋ 'ou') Instead, you should start from your original string as a `bistr`, and then transform it how you want: >>> s = bistr('color') >>> s = s.sub(r'(?<=col)o(?=r)', 'ou') >>> print(s) ('color' ⇋ 'colour') >>> print(s[3:5]) ('o' ⇋ 'ou') Alternatively, you can piece many smaller bistrings together to achieve the alignment you want manually: >>> s = bistr('col') + bistr('o', 'ou') + bistr('r') >>> print(s) ('color' ⇋ 'colour') >>> print(s[3:5]) ('o' ⇋ 'ou') What if I don't know the alignment? ----------------------------------- If at all possible, you should use `bistring` all the way through your text processing code, which will ensure an accurate alignment is tracked for you. If you don't control that code, or there are other reasons it won't work with `bistring`, you can still have us guess an alignment for you in simple cases with :meth:`bistring.bistr.infer`. >>> s = bistr.infer('color', 'colour') >>> print(s[0:3]) ⮎'col'⮌ >>> print(s[3:5]) ('o' ⇋ 'ou') >>> print(s[5:6]) ⮎'r'⮌ `infer()` is an expensive operation (``O(N*M)`` in the length of the strings), so if you absolutely need it, try to use it only for short strings. How do I get the actual indices, rather than just substrings? ------------------------------------------------------------- Use :attr:`bistring.bistr.alignment`: >>> s = bistr('The quick, brown 🦊') >>> s = s.replace(',', '') >>> s = s.replace('🦊', 'fox') >>> print(s[16:19]) ('🦊' ⇋ 'fox') >>> s.alignment.original_bounds(16, 19) (17, 18) >>> s.alignment.modified_bounds(11, 16) (10, 15) >>> print(s[10:15]) ⮎'brown'⮌ See :class:`bistring.Alignment` for more details. How do I perform case-insensitive operations? --------------------------------------------- Use :meth:`bistring.bistr.casefold`. Do not use :meth:`~bistring.bistr.lower`, :meth:`~bistring.bistr.upper`, or any other method, as you will get wrong results for many non-English languages. To check case-insensitive equality, you don't even need `bistring`: >>> 'HELLO WORLD!'.casefold() == 'HeLlO wOrLd!'.casefold() True To search for a substring case-insensitively: >>> s = bistr('Bundesstraße').casefold() >>> s.find_bounds('STRASSE'.casefold()) (6, 13) >>> print(s[6:13]) ('straße' ⇋ 'strasse') Forget case insensitivity, how do I make sure that identical looking strings compare equal? ------------------------------------------------------------------------------------------- This is a hard problem with Unicode strings. To start with, you should at least perform some kind of `Unicode normalization <https://unicode.org/reports/tr15/>`_. That ensures that different ways of writing the semantically identical thing (e.g. with precomposed accented characters vs. combining accents) become actually identical: >>> a = bistr('\u00EAtre') # 'être' with a single character for the ê >>> b = bistr('e\u0302tre') # 'être' with an 'e' and a combining '^' >>> a.normalize('NFC').modified == b.normalize('NFC').modified True >>> a.normalize('NFD').modified == b.normalize('NFD').modified True Normalization form NFC tries to keep precomposed characters together whenever possible, while NFD always decomposes them. In general, NFC is more convenient for people to work with, but NFD can be useful for things like removing accents and other combining marks from text. What about similar-looking strings, that aren't necessarily identical? ---------------------------------------------------------------------- Unicode contains things like ligatures, alternative scripts, and other oddities than can result in similar-looking strings that are represented very differently. Here is where the "compatibility" normalization forms, NFKC and NFKD, can help: >>> s = bistr('𝕳𝖊𝖑𝖑𝖔 𝖜𝖔𝖗𝖑𝖉') >>> s = s.normalize('NFKC') >>> print(s) ('𝕳𝖊𝖑𝖑𝖔 𝖜𝖔𝖗𝖑𝖉' ⇋ 'Hello world') >>> print(s[6:]) ('𝖜𝖔𝖗𝖑𝖉' ⇋ 'world') How do I ensure I get the same results on every machine? -------------------------------------------------------- Always pass an explicit locale to any `bistr` method that takes one. Many of Python's string APIs implicitly use the system's default locale, which may be quite different than the one you developed with. While this may be the right behaviour if you're displaying strings to the current user, it's rarely the right behaviour if you're dealing with text that originated or will be displayed elsewhere, e.g. for cloud software. `bistr` always accepts a locale parameter in these APIs, to ensure reproducible and sensible results: >>> # s will be 'I' in most locales, but 'İ' in Turkish locales! >>> s = bistr('i').upper() >>> # An English locale guarantees a dotless capital I >>> print(bistr('i').upper('en_US')) ('i' ⇋ 'I') >>> # A Turkish locale gives a dotted capital İ >>> print(bistr('i').upper('tr_TR')) ('i' ⇋ 'İ') Tokenization ------------ How do I tokenize text in a reversible way? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `bistring` provides some convenient tokenization APIs that track string indices. To use Unicode word boundary rules, for example: >>> from bistring import WordTokenizer >>> tokenizer = WordTokenizer('en_US') >>> tokens = tokenizer.tokenize('The quick, brown fox jumps over the lazy dog') >>> print(tokens[1]) [4:9]=⮎'quick'⮌ How do I find the whole substring of text for some tokens? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :meth:`bistring.Tokenization.substring` gives the substring itself. :meth:`bistring.Tokenization.text_bounds` gives the bounds of that substring. >>> print(tokens.substring(1, 3)) ⮎'quick, brown'⮌ >>> tokens.text_bounds(1, 3) (4, 16) How do I find the tokens for a substring of text? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :meth:`bistring.Tokenization.bounds_for_text` >>> tokens.bounds_for_text(4, 16) (1, 3) >>> print(tokens.substring(1, 3)) ⮎'quick, brown'⮌ How to I snap a substring of text to the nearest token boundaries? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :meth:`bistring.Tokenization.snap_text_bounds` >>> print(tokens.text[6:14]) ⮎'ick, bro'⮌ >>> tokens.snap_text_bounds(6, 14) (4, 16) >>> print(tokens.text[4:16]) ⮎'quick, brown'⮌ What if I don't know the token positions? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If at all possible, you should use a :class:`bistring.Tokenizer` or some other method that tokenizes with position information. If you can't, you can use :meth:`bistring.Tokenization.infer` to guess the alignment for you: >>> from bistring import Tokenization >>> tokens = Tokenization.infer('hello, world!', ['hello', 'world']) >>> print(tokens[0]) [0:5]=⮎'hello'⮌ >>> print(tokens[1]) [7:12]=⮎'world'⮌
bistring/docs/FAQ.rst/0
{ "file_path": "bistring/docs/FAQ.rst", "repo_id": "bistring", "token_count": 2777 }
367
Python ====== .. toctree:: bistr BistrBuilder Alignment Tokenization Tokenizer
bistring/docs/Python/index.rst/0
{ "file_path": "bistring/docs/Python/index.rst", "repo_id": "bistring", "token_count": 44 }
368
/*! * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT license. */ import Alignment, { BiIndex } from "./alignment"; import BiString, { AnyString } from "./bistring"; import { Replacer, normalizeReplacer, cloneRegExp, isStatefulRegExp } from "./regex"; /** * Bidirectionally transformed string builder. * * * A `BistrBuilder` builds a transformed version of a source string iteratively. Each builder has an immutable * original string, a current string, and the in-progress modified string, with alignments between each. For example: * * .. code-block:: text * * original: |The| |quick,| |brown| |🦊| |jumps| |over| |the| |lazy| |🐶| * | | | | | | | \ \ \ \ \ \ \ \ \ \ \ * current: |The| |quick,| |brown| |fox| |jumps| |over| |the| |lazy| |dog| * | | | / / / * modified: |the| |quick| |brown| ... * * The modified string is built in pieces by calling :js:meth:`replace` to change `n` characters of the current string * into new ones in the modified string. Convenience methods like :js:meth:`skip`, :js:meth:`insert`, and * :js:meth:`discard` are implemented on top of this basic primitive. */ export default class BiStringBuilder { private _original: BiString; private _modified: string[]; private _alignment: BiIndex[]; private _oPos: number; private _mPos: number; /** * Construct a BiStringBuilder. * * @param original * Either an original string or a BiString to start from. */ constructor(original: AnyString) { this._original = BiString.from(original); this._modified = []; this._alignment = [[0, 0]]; this._oPos = 0; this._mPos = 0; } /** * The original string being modified. */ get original(): string { return this._original.original; } /** * The current string before modifications. */ get current(): string { return this._original.modified; } /** * The modified string as built so far. */ get modified(): string { return this._modified.join(""); } /** * The alignment as built so far from `this.current` to `this.modified`. */ get alignment(): Alignment { return new Alignment(this._alignment); } /** * The position of the builder in `this.current`. */ get position(): number { return this._oPos; } /** * The number of characters of the current string left to process. */ get remaining(): number { return this.current.length - this.position; } /** * Whether we've completely processed the string. In other words, whether the modified string aligns with the end * of the current string. */ get isComplete(): boolean { return this.remaining === 0; } /** * Peek at the next few characters. * * @param n * The number of characters to peek at. */ peek(n: number): string { return this.current.slice(this._oPos, this._oPos + n); } private _advance(oCount: number, mCount: number) { this._oPos += oCount; this._mPos += mCount; if (oCount > 0 || mCount > 0) { this._alignment.push([this._oPos, this._mPos]); } } /** * Skip the next `n` characters, copying them unchanged. */ skip(n: number) { if (n > 0) { this._modified.push(this.peek(n)); for (let i = 0; i < n; ++i) { this._advance(1, 1); } } } /** * Skip the rest of the string, copying it unchanged. */ skipRest() { this.skip(this.remaining); } /** * Insert a substring into the string. */ insert(str: string) { this.replace(0, str); } /** * Discard a portion of the original string. */ discard(n: number) { this.replace(n, ""); } /** * Discard the rest of the original string. */ discardRest() { this.discard(this.remaining); } /** * Replace the next `n` characters with a new string. */ replace(n: number, str: AnyString) { if (typeof(str) === "string") { if (str.length > 0) { this._modified.push(str); } this._advance(n, str.length); } else { if (str.original !== this.peek(n)) { throw new Error("BiString doesn't match the current string"); } this._modified.push(str.modified); const alignment = str.alignment.values; for (let i = 1; i < alignment.length; ++i) { const [o0, m0] = alignment[i - 1]; const [o1, m1] = alignment[i]; this._advance(o1 - o0, m1 - m0); } } } /** * Append a BiString. The original value of the BiString must match the current string being processed. */ append(bs: BiString) { this.replace(bs.original.length, bs); } private _match(pattern: RegExp): RegExpExecArray | null { if (!isStatefulRegExp(pattern)) { pattern = cloneRegExp(pattern, "g"); } pattern.lastIndex = this.position; return pattern.exec(this.current); } private * _matchAll(pattern: RegExp): IterableIterator<RegExpExecArray> { if (pattern.global) { pattern.lastIndex = this.position; let match; while ((match = pattern.exec(this.current))) { yield match; } } else { if (!pattern.sticky) { pattern = cloneRegExp(pattern, "g"); } pattern.lastIndex = this.position; let match; if ((match = pattern.exec(this.current))) { yield match; } } } /** * Skip a substring matching a regex, copying it unchanged. * * @param pattern * The pattern to match. Must have either the sticky flag, forcing it to match at the current position, or * the global flag, finding the next match. * @returns * Whether a match was found. */ skipMatch(pattern: RegExp): boolean { if (this._match(pattern)) { this.skip(pattern.lastIndex - this.position); return true; } else { return false; } } /** * Discard a substring that matches a regex. * * @param pattern * The pattern to match. Must have either the sticky flag, forcing it to match at the current position, or * the global flag, finding the next match. * @returns * Whether a match was found. */ discardMatch(pattern: RegExp): boolean { const match = this._match(pattern); if (match) { this.skip(match.index - this.position); this.discard(match[0].length); return true; } else { return false; } } /** * Replace a substring that matches a regex. * * @param pattern * The pattern to match. Must have either the sticky flag, forcing it to match at the current position, or * the global flag, finding the next match. * @param replacement * The replacement string or function, as in :js:meth:`String.prototype.replace`. * @returns * Whether a match was found. */ replaceMatch(pattern: RegExp, replacement: string | Replacer): boolean { const replacer = normalizeReplacer(replacement); const match = this._match(pattern); if (match) { this.skip(match.index - this.position); this.replace(match[0].length, replacer(match)); return true; } else { return false; } } /** * Replace all occurences of a regex, like :js:meth:`String.prototype.replace`. * * @param pattern * The pattern to match. The global flag (/g) must be set to get multiple matches. * @param replacement * The replacement string or function, as in :js:meth:`String.prototype.replace`. */ replaceAll(pattern: RegExp, replacement: string | Replacer) { const replacer = normalizeReplacer(replacement); for (const match of this._matchAll(pattern)) { this.skip(match.index - this.position); this.replace(match[0].length, replacer(match)); } this.skipRest(); } /** * Build the :js:class:`BiString`. */ build(): BiString { if (!this.isComplete) { throw new Error(`The string is not completely built yet (${this.remaining} characters remaining)`); } const alignment = this._original.alignment.compose(this.alignment); return new BiString(this.original, this.modified, alignment); } /** * Reset this builder to apply another transformation. */ rewind() { this._original = this.build(); this._modified = []; this._alignment = [[0, 0]]; this._oPos = 0; this._mPos = 0; } }
bistring/js/src/builder.ts/0
{ "file_path": "bistring/js/src/builder.ts", "repo_id": "bistring", "token_count": 4043 }
369
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. from ._alignment import * from ._bistr import * from ._builder import * from ._token import *
bistring/python/bistring/__init__.py/0
{ "file_path": "bistring/python/bistring/__init__.py", "repo_id": "bistring", "token_count": 49 }
370
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. from bistring import Alignment, bistr import pytest import unicodedata def test_new(): pytest.raises(TypeError, bistr, 42) pytest.raises(TypeError, bistr, 'fourty-two', 42) pytest.raises(TypeError, bistr, 'fourty-two', '42', 42) pytest.raises(ValueError, bistr, 'fourty-two', '42', Alignment([ (0, 0), (9, 2), ])) pytest.raises(ValueError, bistr, 'fourty-two', '42', Alignment([ (0, 0), (10, 1), ])) bistr('42') bistr('fourty-two', '42') bistr('fourty-two', '42', Alignment([ (0, 0), (6, 1), (7, 1), (10, 2), ])) def test_infer(): bs = bistr.infer('test', 'test') assert bs == bistr('test', 'test', Alignment.identity(4)) bs = bistr.infer('color', 'colour') assert bs[3:5].original == 'o' assert bs.inverse() == bistr.infer('colour', 'color') bs = bistr.infer("--Hello, world!--", "hello world") assert bs[:5] == bistr("Hello", "hello", Alignment.identity(5)) assert bs[6:] == bistr("world") bs = bistr.infer( '🅃🄷🄴 🅀🅄🄸🄲🄺, 🄱🅁🄾🅆🄽 🦊 🄹🅄🄼🄿🅂 🄾🅅🄴🅁 🅃🄷🄴 🄻🄰🅉🅈 🐶', 'the quick brown fox jumps over the lazy dog', ) assert bs[0:3] == bistr('🅃🄷🄴', 'the', Alignment.identity(3)) assert bs[4:9] == bistr('🅀🅄🄸🄲🄺', 'quick', Alignment.identity(5)) assert bs[10:15] == bistr('🄱🅁🄾🅆🄽', 'brown', Alignment.identity(5)) assert bs[16:19].original == '🦊' assert bs[16:19].modified == 'fox' assert bs[20:25] == bistr('🄹🅄🄼🄿🅂', 'jumps', Alignment.identity(5)) assert bs[40:43].original == '🐶' assert bs[40:43].modified == 'dog' bs = bistr.infer( 'Ṫḧë qüïċḳ, ḅṛöẅṅ 🦊 jüṁṗṡ öṿëṛ ẗḧë ḷäżÿ 🐶', 'the quick brown fox jumps over the lazy dog', ) assert bs[0:3] == bistr('Ṫḧë', 'the', Alignment.identity(3)) assert bs[4:9] == bistr('qüïċḳ', 'quick', Alignment.identity(5)) assert bs[10:15] == bistr('ḅṛöẅṅ', 'brown', Alignment.identity(5)) assert bs[16:19].original == '🦊' assert bs[16:19].modified == 'fox' assert bs[20:25] == bistr('jüṁṗṡ', 'jumps', Alignment.identity(5)) assert bs[40:43].original == '🐶' assert bs[40:43].modified == 'dog' bs = bistr.infer('Z̴̡̪̫̖̥̔̿̃̈̏̎͠͝á̸̪̠̖̻̬̖̪̞͙͇̮̠͎̆͋́̐͌̒͆̓l̶͉̭̳̤̬̮̩͎̟̯̜͇̥̠̘͑͐̌͂̄́̀̂̌̈͛̊̄̚͜ģ̸̬̼̞̙͇͕͎̌̾̒̐̿̎̆̿̌̃̏̌́̾̈͘͜o̶̢̭͕͔̩͐ ̴̡̡̜̥̗͔̘̦͉̣̲͚͙̐̈́t̵͈̰̉̀͒̎̈̿̔̄̽͑͝͠ẹ̵̫̲̫̄͜͜x̵͕̳͈̝̤̭̼̼̻͓̿̌̽̂̆̀̀̍̒͐́̈̀̚͝t̸̡̨̥̺̣̟͎̝̬̘̪͔͆́̄̅̚', 'Zalgo text') for i, c in enumerate(bs): assert bs[i:i+1].original.startswith(c) def test_concat(): bs = bistr(' ', '') bs += 'Hello' bs += bistr(' ', ' ') bs += 'world!' bs += bistr(' ', '') assert bs.original == ' Hello world! ' assert bs.modified == 'Hello world!' bs = bs[4:7] assert bs.original == 'o w' assert bs.modified == 'o w' bs = bs[1:2] assert bs.original == ' ' assert bs.modified == ' ' def test_find_index(): bs = bistr('dysfunction') assert bs.find('dis') == -1 assert bs.find('fun') == 3 assert bs.find('n') == 5 assert bs.find('n', 6) == 10 assert bs.find_bounds('dis') == (-1, -1) assert bs.find_bounds('fun') == (3, 6) assert bs.find_bounds('n') == (5, 6) assert bs.find_bounds('n', 6) == (10, 11) pytest.raises(ValueError, bs.index, 'dis') pytest.raises(ValueError, bs.index_bounds, 'dis') assert bs.index('fun') == 3 assert bs.index_bounds('fun') == (3, 6) assert bs.index_bounds('n') == (5, 6) assert bs.index_bounds('n', 6) == (10, 11) def test_rfind_rindex(): bs = bistr('dysfunction') assert bs.rfind('dis') == -1 assert bs.rfind('fun') == 3 assert bs.rfind('n') == 10 assert bs.rfind('n', None, 9) == 5 assert bs.rfind_bounds('dis') == (-1, -1) assert bs.rfind_bounds('fun') == (3, 6) assert bs.rfind_bounds('n') == (10, 11) assert bs.rfind_bounds('n', None, 9) == (5, 6) pytest.raises(ValueError, bs.index, 'dis') pytest.raises(ValueError, bs.index_bounds, 'dis') assert bs.rindex('fun') == 3 assert bs.rindex_bounds('fun') == (3, 6) assert bs.rindex_bounds('n') == (10, 11) assert bs.rindex_bounds('n', None, 9) == (5, 6) def test_starts_ends_with(): bs = bistr('Beginning, middle, ending') assert bs.startswith('Begin') assert bs.endswith('ing') assert not bs.startswith('ending') assert not bs.endswith('Beginning') assert bs.startswith(('Begin', 'End')) assert bs.endswith(('beginning', 'ending')) def test_justify(): bs = bistr('Hello world!') assert bs.center(5) == bs assert bs.center(20) == bistr('', ' ') + bs + bistr('', ' ') assert bs.center(21) == bistr('', ' ') + bs + bistr('', ' ') assert bs.ljust(5) == bs assert bs.ljust(16) == bs + bistr('', ' ') assert bs.rjust(5) == bs assert bs.rjust(16) == bistr('', ' ') + bs def test_join(): assert bistr('').join([]) == bistr('') sep = bistr('|', '::') args = ['Hello', bistr('WORLD').lower()] assert sep.join(args) == args[0] + sep + args[1] def test_split(): bs = bistr('1,2,3') assert bs.split(',') == [bistr('1'), bistr('2'), bistr('3')] assert bs.split(',', 1) == [bistr('1'), bistr('2,3')] assert bistr('1,2,,3,').split(',') == [bistr('1'), bistr('2'), bistr(''), bistr('3'), bistr('')] assert bistr('').split(',') == [bistr('')] assert bistr('1<>2<>3').split('<>') == [bistr('1'), bistr('2'), bistr('3')] bs = bistr(' 1 2 3 ') assert bs.split() == [bistr('1'), bistr('2'), bistr('3')] assert bs.split(maxsplit=-1) == [bistr('1'), bistr('2'), bistr('3')] assert bs.split(maxsplit=2) == [bistr('1'), bistr('2'), bistr('3 ')] assert bs.split(maxsplit=1) == [bistr('1'), bistr('2 3 ')] assert bistr('').split() == [] def test_partition(): bs = bistr('left::middle::right') left, sep, right = bs.partition('::') assert left == bistr('left') assert sep == bistr('::') assert right == bistr('middle::right') left, sep, right = bs.partition(':::') assert left == bs assert sep == bistr('') assert right == bistr('') left, sep, right = bs.rpartition('::') assert left == bistr('left::middle') assert sep == bistr('::') assert right == bistr('right') left, sep, right = bs.rpartition(':::') assert left == bistr('') assert sep == bistr('') assert right == bs def test_expandtabs(): bs = bistr(' \tHello\t\tworld!\n\tGoodbye \tworld!') bs = bs.expandtabs() assert bs.modified == bs.original.expandtabs() assert bs[0:1] == bistr(' ') assert bs[1:8] == bistr('\t', ' ') assert bs[8:13] == bistr('Hello') assert bs[13:16] == bistr('\t', ' ') assert bs[16:24] == bistr('\t', ' ') assert bs[24:30] == bistr('world!') assert bs[30:31] == bistr('\n') def test_strip(): bs = bistr(' Hello world! ') assert bs.original == ' Hello world! ' assert bs.modified == ' Hello world! ' bs = bs.strip() assert bs.original == ' Hello world! ' assert bs.modified == 'Hello world!' bs = bistr(' ').strip() assert bs.modified == '' assert bs.original == ' ' def test_casefold(): # 'Híffi' # í has a combining acute accent, ffi is a ligature bs = bistr('Hi\u0301\uFB03').casefold() assert bs.original == 'Hi\u0301\uFB03' assert bs.modified == 'hi\u0301ffi' assert bs.modified == bs.original.casefold() assert bs[:3].original == 'Hi\u0301' assert bs[:3].modified == 'hi\u0301' assert bs[4:5].original == '\uFB03' assert bs[4:5].modified == 'f' # Odysseus bs = bistr('Ὀδυσσεύς').casefold() assert bs.original == 'Ὀδυσσεύς' assert bs.modified == 'ὀδυσσεύσ' def test_lower(): bs = bistr('DİYARBAKIR').lower('en_US') assert bs.original == 'DİYARBAKIR' assert bs.modified == 'di̇yarbakir' bs = bistr('DİYARBAKIR').lower('tr_TR') assert bs.original == 'DİYARBAKIR' assert bs.modified == 'diyarbakır' # Odysseus bs = bistr('ὈΔΥΣΣΕΎΣ').lower('el_GR') assert bs.original == 'ὈΔΥΣΣΕΎΣ' assert bs.modified == 'ὀδυσσεύς' # Examples from The Unicode Standard, Version 12.0, Chapter 3.13 bs = bistr('ᾼΣͅ').lower('el_GR') assert bs.original == 'ᾼΣͅ' assert bs.modified == 'ᾳςͅ' bs = bistr('ͅΣͅ').lower('el_GR') assert bs.original == 'ͅΣͅ' assert bs.modified == 'ͅσͅ' bs = bistr('ᾼΣᾼ').lower('el_GR') assert bs.original == 'ᾼΣᾼ' assert bs.modified == 'ᾳσᾳ' bs = bistr('Σ').lower('el_GR') assert bs.original == 'Σ' assert bs.modified == 'σ' def test_upper(): bs = bistr('straße').upper('de_DE') assert bs.original == 'straße' assert bs.modified == 'STRASSE' assert bs[4:6].original == 'ß' assert bs[4:6].modified == 'SS' bs = bistr('Diyarbakır').upper('tr_TR') assert bs.original == 'Diyarbakır' assert bs.modified == 'DİYARBAKIR' # Odysseus bs = bistr('Ὀδυσσεύς').upper('und') assert bs.original == 'Ὀδυσσεύς' assert bs.modified == 'ὈΔΥΣΣΕΎΣ' def test_title(): bs = bistr('istanbul').title('en_US') assert bs.original == 'istanbul' assert bs.modified == 'Istanbul' bs = bistr('istanbul').title('tr_TR') assert bs.original == 'istanbul' assert bs.modified == 'İstanbul' def test_capitalize(): bs = bistr('hello WORLD').capitalize('en_US') assert bs.original == 'hello WORLD' assert bs.modified == 'Hello world' assert bs.alignment == Alignment.identity(11) bs = bistr('τελικός').capitalize('el_GR') assert bs.original == 'τελικός' assert bs.modified == 'Τελικός' assert bs.alignment == Alignment.identity(7) bs = bistr('ἴΣ').capitalize('el_GR') assert bs.original == 'ἴΣ' assert bs.modified == 'Ἴς' assert bs.alignment == Alignment.identity(2) def test_swapcase(): bs = bistr('hello WORLD').swapcase('en_US') assert bs.original == 'hello WORLD' assert bs.modified == 'HELLO world' assert bs.alignment == Alignment.identity(11) # Ligatures/digraphs in title case don't have a swapped form bs = bistr('Ljepòta').swapcase('hr_HR') assert bs.original == 'Ljepòta' assert bs.modified == 'LjEPÒTA' assert bs.alignment == Alignment.identity(6) bs = bistr('Ljepòta').normalize('NFKC').swapcase('hr_HR') assert bs.original == 'Ljepòta' assert bs.modified == 'lJEPÒTA' assert bs[0:2] == bistr('Lj', 'lJ') def test_normalize(): # "Héllö" -- é is composed but ö has a combining diaeresis bs = bistr('H\u00E9llo\u0308').normalize('NFC') assert bs.original == 'H\u00E9llo\u0308' assert bs.modified == 'H\u00E9ll\u00F6' assert bs.modified == unicodedata.normalize('NFC', bs.original) assert bs[1:2] == bistr('\u00E9') assert bs[4:5] == bistr('o\u0308', '\u00F6') bs = bistr('H\u00E9llo\u0308').normalize('NFD') assert bs.original == 'H\u00E9llo\u0308' assert bs.modified == 'He\u0301llo\u0308' assert bs.modified == unicodedata.normalize('NFD', bs.original) assert bs[1:3] == bistr('\u00E9', 'e\u0301') assert bs[5:7] == bistr('o\u0308') def test_readme(): bs = bistr('𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐, 𝖇𝖗𝖔𝖜𝖓 🦊 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 🐶') bs = bs.normalize('NFKD') bs = bs.casefold() bs = bs.replace('🦊', 'fox') bs = bs.replace('🐶', 'dog') bs = bs.sub(r'[^\w\s]+', '') bs = bs[:19] assert bs.modified == 'the quick brown fox' assert bs.original == '𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐, 𝖇𝖗𝖔𝖜𝖓 🦊' def test_equality(): bs1 = bistr(' Hello world ').strip().casefold() bs2 = bistr(' Hello world ', 'hello world', Alignment([ (0, 0), (2, 0), (3, 1), (4, 2), (5, 3), (6, 4), (7, 5), (8, 6), (9, 7), (10, 8), (11, 9), (12, 10), (13, 11), (15, 11), ])) assert bs1 == bs2 def test_alternative_regex(): import regex bs = bistr('The quick, brown 🦊 jumps over the lazy 🐶') bs = bs.sub(regex.compile(r'\pS'), lambda m: unicodedata.name(m.group())) assert bs[17:25] == bistr('🦊', 'FOX FACE') assert bs[46:] == bistr('🐶', 'DOG FACE')
bistring/python/tests/test_bistr.py/0
{ "file_path": "bistring/python/tests/test_bistr.py", "repo_id": "bistring", "token_count": 6569 }
371
# HowTo: Block all Skill Claims Write a class that conforms to the `ValidateClaims` interface and throws an exception if the claims are skill claims: ```python class AllowedSkillsClaimsValidator: config_key = "ALLOWED_CALLERS" def __init__(self, config: DefaultConfig): if not config: raise TypeError( "AllowedSkillsClaimsValidator: config object cannot be None." ) # ALLOWED_CALLERS is the setting in config.py file # that consists of the list of parent bot ids that are allowed to access the skill # to add a new parent bot simply go to the AllowedCallers and add # the parent bot's microsoft app id to the list caller_list = getattr(config, self.config_key) if caller_list is None: raise TypeError(f'"{self.config_key}" not found in configuration.') self._allowed_callers = caller_list @property def claims_validator(self) -> Callable[[List[Dict]], Awaitable]: async def allow_callers_claims_validator(claims: Dict[str, object]): if skillValidation.is_skill_claim(claims): raise PermissionError( "Invalid call from a skill." ) return return allow_callers_claims_validator ``` Update `BotFrameworkAdapter` instantiation, to pass the `AuthenticationConfiguration` constructor the function defined above: ```python AUTH_CONFIG = AuthenticationConfiguration( claims_validator=AllowedSkillsClaimsValidator(CONFIG).claims_validator ) SETTINGS = BotFrameworkAdapterSettings( ..., auth_configuration=AUTH_CONFIG, ) ADAPTER = BotFrameworkAdapter( ..., SETTINGS, ) ``` For SingleTenant type bots, the additional issuers must be added based on the tenant id: ```python AUTH_CONFIG = AuthenticationConfiguration( claims_validator=AllowedSkillsClaimsValidator(CONFIG).claims_validator, tenant_id=the_tenant_id ) ```
botbuilder-python/doc/SkillClaimsValidation.md/0
{ "file_path": "botbuilder-python/doc/SkillClaimsValidation.md", "repo_id": "botbuilder-python", "token_count": 770 }
372
{ "luis_schema_version": "3.2.0", "versionId": "0.1", "name": "FlightBooking", "desc": "Luis Model for CoreBot", "culture": "en-us", "tokenizerVersion": "1.0.0", "intents": [ { "name": "BookFlight" }, { "name": "Cancel" }, { "name": "GetWeather" }, { "name": "None" } ], "entities": [], "composites": [ { "name": "From", "children": [ "Airport" ], "roles": [] }, { "name": "To", "children": [ "Airport" ], "roles": [] } ], "closedLists": [ { "name": "Airport", "subLists": [ { "canonicalForm": "Paris", "list": [ "paris", "cdg" ] }, { "canonicalForm": "London", "list": [ "london", "lhr" ] }, { "canonicalForm": "Berlin", "list": [ "berlin", "txl" ] }, { "canonicalForm": "New York", "list": [ "new york", "jfk" ] }, { "canonicalForm": "Seattle", "list": [ "seattle", "sea" ] } ], "roles": [] } ], "patternAnyEntities": [], "regex_entities": [], "prebuiltEntities": [ { "name": "datetimeV2", "roles": [] } ], "model_features": [], "regex_features": [], "patterns": [], "utterances": [ { "text": "book a flight", "intent": "BookFlight", "entities": [] }, { "text": "book a flight from new york", "intent": "BookFlight", "entities": [ { "entity": "From", "startPos": 19, "endPos": 26 } ] }, { "text": "book a flight from seattle", "intent": "BookFlight", "entities": [ { "entity": "From", "startPos": 19, "endPos": 25 } ] }, { "text": "book a hotel in new york", "intent": "None", "entities": [] }, { "text": "book a restaurant", "intent": "None", "entities": [] }, { "text": "book flight from london to paris on feb 14th", "intent": "BookFlight", "entities": [ { "entity": "From", "startPos": 17, "endPos": 22 }, { "entity": "To", "startPos": 27, "endPos": 31 } ] }, { "text": "book flight to berlin on feb 14th", "intent": "BookFlight", "entities": [ { "entity": "To", "startPos": 15, "endPos": 20 } ] }, { "text": "book me a flight from london to paris", "intent": "BookFlight", "entities": [ { "entity": "From", "startPos": 22, "endPos": 27 }, { "entity": "To", "startPos": 32, "endPos": 36 } ] }, { "text": "bye", "intent": "Cancel", "entities": [] }, { "text": "cancel booking", "intent": "Cancel", "entities": [] }, { "text": "exit", "intent": "Cancel", "entities": [] }, { "text": "find an airport near me", "intent": "None", "entities": [] }, { "text": "flight to paris", "intent": "BookFlight", "entities": [ { "entity": "To", "startPos": 10, "endPos": 14 } ] }, { "text": "flight to paris from london on feb 14th", "intent": "BookFlight", "entities": [ { "entity": "To", "startPos": 10, "endPos": 14 }, { "entity": "From", "startPos": 21, "endPos": 26 } ] }, { "text": "fly from berlin to paris on may 5th", "intent": "BookFlight", "entities": [ { "entity": "From", "startPos": 9, "endPos": 14 }, { "entity": "To", "startPos": 19, "endPos": 23 } ] }, { "text": "go to paris", "intent": "BookFlight", "entities": [ { "entity": "To", "startPos": 6, "endPos": 10 } ] }, { "text": "going from paris to berlin", "intent": "BookFlight", "entities": [ { "entity": "From", "startPos": 11, "endPos": 15 }, { "entity": "To", "startPos": 20, "endPos": 25 } ] }, { "text": "i'd like to rent a car", "intent": "None", "entities": [] }, { "text": "ignore", "intent": "Cancel", "entities": [] }, { "text": "travel from new york to paris", "intent": "BookFlight", "entities": [ { "entity": "From", "startPos": 12, "endPos": 19 }, { "entity": "To", "startPos": 24, "endPos": 28 } ] }, { "text": "travel to new york", "intent": "BookFlight", "entities": [ { "entity": "To", "startPos": 10, "endPos": 17 } ] }, { "text": "travel to paris", "intent": "BookFlight", "entities": [ { "entity": "To", "startPos": 10, "endPos": 14 } ] }, { "text": "what's the forecast for this friday?", "intent": "GetWeather", "entities": [] }, { "text": "what's the weather like for tomorrow", "intent": "GetWeather", "entities": [] }, { "text": "what's the weather like in new york", "intent": "GetWeather", "entities": [] }, { "text": "what's the weather like?", "intent": "GetWeather", "entities": [] }, { "text": "winter is coming", "intent": "None", "entities": [] } ], "settings": [] }
botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/cognitiveModels/FlightBooking.json/0
{ "file_path": "botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/cognitiveModels/FlightBooking.json", "repo_id": "botbuilder-python", "token_count": 3667 }
373
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from botbuilder.dialogs import ( ComponentDialog, WaterfallDialog, WaterfallStepContext, DialogTurnResult, ) from botbuilder.dialogs.prompts import TextPrompt, PromptOptions from botbuilder.core import MessageFactory, TurnContext from botbuilder.schema import InputHints from booking_details import BookingDetails from flight_booking_recognizer import FlightBookingRecognizer from helpers import LuisHelper, Intent from .booking_dialog import BookingDialog class MainDialog(ComponentDialog): def __init__( self, luis_recognizer: FlightBookingRecognizer, booking_dialog: BookingDialog ): super(MainDialog, self).__init__(MainDialog.__name__) self._luis_recognizer = luis_recognizer self._booking_dialog_id = booking_dialog.id self.add_dialog(TextPrompt(TextPrompt.__name__)) self.add_dialog(booking_dialog) self.add_dialog( WaterfallDialog( "WFDialog", [self.intro_step, self.act_step, self.final_step] ) ) self.initial_dialog_id = "WFDialog" async def intro_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: if not self._luis_recognizer.is_configured: await step_context.context.send_activity( MessageFactory.text( "NOTE: LUIS is not configured. To enable all capabilities, add 'LuisAppId', 'LuisAPIKey' and " "'LuisAPIHostName' to the appsettings.json file.", input_hint=InputHints.ignoring_input, ) ) return await step_context.next(None) message_text = ( str(step_context.options) if step_context.options else "What can I help you with today?" ) prompt_message = MessageFactory.text( message_text, message_text, InputHints.expecting_input ) return await step_context.prompt( TextPrompt.__name__, PromptOptions(prompt=prompt_message) ) async def act_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: if not self._luis_recognizer.is_configured: # LUIS is not configured, we just run the BookingDialog path with an empty BookingDetailsInstance. return await step_context.begin_dialog( self._booking_dialog_id, BookingDetails() ) # Call LUIS and gather any potential booking details. (Note the TurnContext has the response to the prompt.) intent, luis_result = await LuisHelper.execute_luis_query( self._luis_recognizer, step_context.context ) if intent == Intent.BOOK_FLIGHT.value and luis_result: # Show a warning for Origin and Destination if we can't resolve them. await MainDialog._show_warning_for_unsupported_cities( step_context.context, luis_result ) # Run the BookingDialog giving it whatever details we have from the LUIS call. return await step_context.begin_dialog(self._booking_dialog_id, luis_result) if intent == Intent.GET_WEATHER.value: get_weather_text = "TODO: get weather flow here" get_weather_message = MessageFactory.text( get_weather_text, get_weather_text, InputHints.ignoring_input ) await step_context.context.send_activity(get_weather_message) else: didnt_understand_text = ( "Sorry, I didn't get that. Please try asking in a different way" ) didnt_understand_message = MessageFactory.text( didnt_understand_text, didnt_understand_text, InputHints.ignoring_input ) await step_context.context.send_activity(didnt_understand_message) return await step_context.next(None) async def final_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: # If the child dialog ("BookingDialog") was cancelled or the user failed to confirm, # the Result here will be null. if step_context.result is not None: result = step_context.result # Now we have all the booking details call the booking service. # If the call to the booking service was successful tell the user. # time_property = Timex(result.travel_date) # travel_date_msg = time_property.to_natural_language(datetime.now()) msg_txt = f"I have you booked to {result.destination} from {result.origin} on {result.travel_date}" message = MessageFactory.text(msg_txt, msg_txt, InputHints.ignoring_input) await step_context.context.send_activity(message) prompt_message = "What else can I do for you?" return await step_context.replace_dialog(self.id, prompt_message) @staticmethod async def _show_warning_for_unsupported_cities( context: TurnContext, luis_result: BookingDetails ) -> None: if luis_result.unsupported_airports: message_text = ( f"Sorry but the following airports are not supported:" f" {', '.join(luis_result.unsupported_airports)}" ) message = MessageFactory.text( message_text, message_text, InputHints.ignoring_input ) await context.send_activity(message)
botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/dialogs/main_dialog.py/0
{ "file_path": "botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/dialogs/main_dialog.py", "repo_id": "botbuilder-python", "token_count": 2306 }
374
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import hashlib import hmac import json from io import IOBase from typing import List, Union import aiohttp from aiohttp.web_request import Request from slack.web.client import WebClient from slack.web.slack_response import SlackResponse from botbuilder.schema import Activity from botbuilder.adapters.slack.slack_client_options import SlackClientOptions from botbuilder.adapters.slack.slack_message import SlackMessage POST_MESSAGE_URL = "https://slack.com/api/chat.postMessage" POST_EPHEMERAL_MESSAGE_URL = "https://slack.com/api/chat.postEphemeral" class SlackClient(WebClient): """ Slack client that extends https://github.com/slackapi/python-slackclient. """ def __init__(self, options: SlackClientOptions): if not options or not options.slack_bot_token: raise Exception("SlackAdapterOptions and bot_token are required") if ( not options.slack_verification_token and not options.slack_client_signing_secret ): warning = ( "\n****************************************************************************************\n" "* WARNING: Your bot is operating without recommended security mechanisms in place. *\n" "* Initialize your adapter with a clientSigningSecret parameter to enable *\n" "* verification that all incoming webhooks originate with Slack: *\n" "* *\n" "* adapter = new SlackAdapter({clientSigningSecret: <my secret from slack>}); *\n" "* *\n" "****************************************************************************************\n" ">> Slack docs: https://api.slack.com/docs/verifying-requests-from-slack" ) raise Exception( warning + "Required: include a verificationToken or clientSigningSecret to verify incoming Events API webhooks" ) super().__init__(token=options.slack_bot_token, run_async=True) self.options = options self.identity = None async def login_with_slack(self): if self.options.slack_bot_token: self.identity = await self.test_auth() elif ( not self.options.slack_client_id or not self.options.slack_client_secret or not self.options.slack_redirect_uri or not self.options.slack_scopes ): raise Exception( "Missing Slack API credentials! Provide SlackClientId, SlackClientSecret, scopes and SlackRedirectUri " "as part of the SlackAdapter options." ) def is_logged_in(self): return self.identity is not None async def test_auth(self) -> str: auth = await self.auth_test() return auth.data["user_id"] async def channels_list_ex(self, exclude_archived: bool = True) -> SlackResponse: args = {"exclude_archived": "1" if exclude_archived else "0"} return await self.channels_list(**args) async def users_counts(self) -> SlackResponse: return await self.api_call("users.counts") async def im_history_ex( self, channel: str, latest_timestamp: str = None, oldest_timestamp: str = None, count: int = None, unreads: bool = None, ) -> SlackResponse: args = {} if latest_timestamp: args["latest"] = latest_timestamp if oldest_timestamp: args["oldest"] = oldest_timestamp if count: args["count"] = str(count) if unreads: args["unreads"] = "1" if unreads else "0" return await self.im_history(channel=channel, **args) async def files_info_ex( self, file_id: str, page: int = None, count: int = None ) -> SlackResponse: args = {"count": str(count), "page": str(page)} return await self.files_info(file=file_id, **args) async def files_list_ex( self, user_id: str = None, date_from: str = None, date_to: str = None, count: int = None, page: int = None, types: List[str] = None, ) -> SlackResponse: args = {} if user_id: args["user"] = user_id if date_from: args["ts_from"] = date_from if date_to: args["ts_to"] = date_to if count: args["count"] = str(count) if page: args["page"] = str(page) if types: args["types"] = ",".join(types) return await self.files_list(**args) async def groups_history_ex( self, channel: str, latest: str = None, oldest: str = None, count: int = None ) -> SlackResponse: args = {} if latest: args["latest"] = latest if oldest: args["oldest"] = oldest if count: args["count"] = count return await self.groups_history(channel=channel, **args) async def groups_list_ex(self, exclude_archived: bool = True) -> SlackResponse: args = {"exclude_archived": "1" if exclude_archived else "0"} return await self.groups_list(**args) async def get_preferences(self) -> SlackResponse: return await self.api_call("users.prefs.get", http_verb="GET") async def stars_list_ex( self, user: str = None, count: int = None, page: int = None ) -> SlackResponse: args = {} if user: args["user"] = user if count: args["count"] = str(count) if page: args["page"] = str(page) return await self.stars_list(**args) async def groups_close(self, channel: str) -> SlackResponse: args = {"channel": channel} return await self.api_call("groups.close", params=args) async def chat_post_ephemeral_ex( self, channel: str, text: str, target_user: str, parse: str = None, link_names: bool = False, attachments: List[str] = None, # pylint: disable=unused-argument as_user: bool = False, ) -> SlackResponse: args = { "text": text, "link_names": "1" if link_names else "0", "as_user": "1" if as_user else "0", } if parse: args["parse"] = parse # TODO: attachments (see PostEphemeralMessageAsync) # See: https://api.slack.com/messaging/composing/layouts#attachments # See: https://github.com/Inumedia/SlackAPI/blob/master/SlackAPI/Attachment.cs return await self.chat_postEphemeral(channel=channel, user=target_user, **args) async def chat_post_message_ex( self, channel: str, text: str, bot_name: str = None, parse: str = None, link_names: bool = False, blocks: List[str] = None, # pylint: disable=unused-argument attachments: List[str] = None, # pylint: disable=unused-argument unfurl_links: bool = False, icon_url: str = None, icon_emoji: str = None, as_user: bool = False, ) -> SlackResponse: args = { "text": text, "link_names": "1" if link_names else "0", "as_user": "1" if as_user else "0", } if bot_name: args["username"] = bot_name if parse: args["parse"] = parse if unfurl_links: args["unfurl_links"] = "1" if unfurl_links else "0" if icon_url: args["icon_url"] = icon_url if icon_emoji: args["icon_emoji"] = icon_emoji # TODO: blocks and attachments (see PostMessageAsync) # the blocks and attachments are combined into a single dict # See: https://api.slack.com/messaging/composing/layouts#attachments # See: https://github.com/Inumedia/SlackAPI/blob/master/SlackAPI/Attachment.cs return await self.chat_postMessage(channel=channel, **args) async def search_all_ex( self, query: str, sorting: str = None, direction: str = None, enable_highlights: bool = False, count: int = None, page: int = None, ) -> SlackResponse: args = {"highlight": "1" if enable_highlights else "0"} if sorting: args["sort"] = sorting if direction: args["sort_dir"] = direction if count: args["count"] = str(count) if page: args["page"] = str(page) return await self.search_all(query=query, **args) async def search_files_ex( self, query: str, sorting: str = None, direction: str = None, enable_highlights: bool = False, count: int = None, page: int = None, ) -> SlackResponse: args = {"highlight": "1" if enable_highlights else "0"} if sorting: args["sort"] = sorting if direction: args["sort_dir"] = direction if count: args["count"] = str(count) if page: args["page"] = str(page) return await self.search_files(query=query, **args) async def search_messages_ex( self, query: str, sorting: str = None, direction: str = None, enable_highlights: bool = False, count: int = None, page: int = None, ) -> SlackResponse: args = {"highlight": "1" if enable_highlights else "0"} if sorting: args["sort"] = sorting if direction: args["sort_dir"] = direction if count: args["count"] = str(count) if page: args["page"] = str(page) return await self.search_messages(query=query, **args) async def chat_update_ex( self, timestamp: str, channel: str, text: str, bot_name: str = None, parse: str = None, link_names: bool = False, attachments: List[str] = None, # pylint: disable=unused-argument as_user: bool = False, ): args = { "text": text, "link_names": "1" if link_names else "0", "as_user": "1" if as_user else "0", } if bot_name: args["username"] = bot_name if parse: args["parse"] = parse # TODO: attachments (see PostEphemeralMessageAsync) # See: https://api.slack.com/messaging/composing/layouts#attachments # See: https://github.com/Inumedia/SlackAPI/blob/master/SlackAPI/Attachment.cs return await self.chat_update(channel=channel, ts=timestamp) async def files_upload_ex( self, file: Union[str, IOBase] = None, content: str = None, channels: List[str] = None, title: str = None, initial_comment: str = None, file_type: str = None, ): args = {} if channels: args["channels"] = ",".join(channels) if title: args["title"] = title if initial_comment: args["initial_comment"] = initial_comment if file_type: args["filetype"] = file_type return await self.files_upload(file=file, content=content, **args) async def get_bot_user_identity( self, activity: Activity # pylint: disable=unused-argument ) -> str: return self.identity def verify_signature(self, req: Request, body: str) -> bool: timestamp = req.headers["X-Slack-Request-Timestamp"] message = ":".join(["v0", timestamp, body]) computed_signature = "V0=" + hmac.new( bytes(self.options.slack_client_signing_secret, "utf-8"), msg=bytes(message, "utf-8"), digestmod=hashlib.sha256, ).hexdigest().upper().replace("-", "") received_signature = req.headers["X-Slack-Signature"].upper() return computed_signature == received_signature async def post_message(self, message: SlackMessage) -> SlackResponse: if not message: return None request_content = { "token": self.options.slack_bot_token, "channel": message.channel, "text": message.text, } if message.thread_ts: request_content["thread_ts"] = message.thread_ts if message.blocks: request_content["blocks"] = json.dumps(message.blocks) session = aiohttp.ClientSession( timeout=aiohttp.ClientTimeout(total=30), ) http_verb = "POST" api_url = POST_EPHEMERAL_MESSAGE_URL if message.ephemeral else POST_MESSAGE_URL req_args = {"data": request_content} async with session.request(http_verb, api_url, **req_args) as res: response_content = {} try: response_content = await res.json() except aiohttp.ContentTypeError: pass response_data = { "data": response_content, "headers": res.headers, "status_code": res.status, } data = { "client": self, "http_verb": http_verb, "api_url": api_url, "req_args": req_args, } response = SlackResponse(**{**data, **response_data}).validate() await session.close() return response
botbuilder-python/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_client.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_client.py", "repo_id": "botbuilder-python", "token_count": 6422 }
375
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. from pathlib import PurePosixPath from typing import Tuple from urllib.parse import ParseResult, parse_qs, unquote, urlparse, urlunparse from uuid import UUID, uuid4 class LuisApplication: """ Data describing a LUIS application. """ def __init__(self, application_id: str, endpoint_key: str, endpoint: str): """Initializes a new instance of the :class:`LuisApplication` class. :param application_id: LUIS application ID. :type application_id: str :param endpoint_key: LUIS subscription or endpoint key. :type endpoint_key: str :param endpoint: LUIS endpoint to use, like https://westus.api.cognitive.microsoft.com. :type endpoint: str :raises ValueError: :raises ValueError: :raises ValueError: """ _, valid = LuisApplication._try_parse_uuid4(application_id) if not valid: raise ValueError(f'"{application_id}" is not a valid LUIS application id.') _, valid = LuisApplication._try_parse_uuid4(endpoint_key) if not valid: raise ValueError(f'"{endpoint_key}" is not a valid LUIS subscription key.') if not endpoint or endpoint.isspace(): endpoint = "https://westus.api.cognitive.microsoft.com" _, valid = LuisApplication._try_parse_url(endpoint) if not valid: raise ValueError(f'"{endpoint}" is not a valid LUIS endpoint.') self.application_id = application_id self.endpoint_key = endpoint_key self.endpoint = endpoint @classmethod def from_application_endpoint(cls, application_endpoint: str): """Initializes a new instance of the :class:`LuisApplication` class. :param application_endpoint: LUIS application endpoint. :type application_endpoint: str :return: :rtype: LuisApplication """ (application_id, endpoint_key, endpoint) = LuisApplication._parse( application_endpoint ) return cls(application_id, endpoint_key, endpoint) @staticmethod def _parse(application_endpoint: str) -> Tuple[str, str, str]: url, valid = LuisApplication._try_parse_url(application_endpoint) if not valid: raise ValueError( f"{application_endpoint} is not a valid LUIS application endpoint." ) segments = PurePosixPath(unquote(url.path)).parts application_id = segments[-1] if segments else None qs_parsed_result = parse_qs(url.query) endpoint_key = qs_parsed_result.get("subscription-key", [None])[0] parts_for_base_url = url.scheme, url.netloc, "", None, None, None endpoint = urlunparse(parts_for_base_url) return (application_id, endpoint_key, endpoint) @staticmethod def _try_parse_uuid4(uuid_string: str) -> Tuple[uuid4, bool]: try: uuid = UUID(uuid_string, version=4) except (TypeError, ValueError): return None, False return uuid, True @staticmethod def _try_parse_url(url: str) -> Tuple[ParseResult, bool]: try: result = urlparse(url) return result, True except ValueError: return None, False
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/luis/luis_application.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/luis/luis_application.py", "repo_id": "botbuilder-python", "token_count": 1360 }
376
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from msrest.serialization import Model class FeedbackRecord(Model): """Active learning feedback record.""" _attribute_map = { "user_id": {"key": "userId", "type": "str"}, "user_question": {"key": "userQuestion", "type": "str"}, "qna_id": {"key": "qnaId", "type": "int"}, } def __init__(self, **kwargs): super().__init__(**kwargs) self.user_id = kwargs.get("user_id", None) self.user_question = kwargs.get("user_question", None) self.qna_id = kwargs.get("qna_id", None)
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/models/feedback_record.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/models/feedback_record.py", "repo_id": "botbuilder-python", "token_count": 258 }
377
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from .models import Metadata, QnARequestContext from .models.ranker_types import RankerTypes from .models.join_operator import JoinOperator class QnAMakerOptions: """ Defines options used to configure a `QnAMaker` instance. remarks: -------- All parameters are optional. """ def __init__( self, score_threshold: float = 0.0, timeout: int = 0, top: int = 0, strict_filters: [Metadata] = None, context: [QnARequestContext] = None, qna_id: int = None, is_test: bool = False, ranker_type: str = RankerTypes.DEFAULT, strict_filters_join_operator: str = JoinOperator.AND, ): """ Parameters: ----------- score_threshold (float): The minimum score threshold, used to filter returned results. Values range from score of 0.0 to 1.0. timeout (int): The time in milliseconds to wait before the request times out. top (int): The number of ranked results to return. strict_filters ([Metadata]): Filters to use on queries to a QnA knowledge base, based on a QnA pair's metadata. context ([QnARequestContext]): The context of the previous turn. qna_id (int): Id of the current question asked (if available). is_test (bool): A value indicating whether to call test or prod environment of a knowledge base. ranker_type (str): The QnA ranker type to use. strict_filters_join_operator (str): A value indicating how strictly you want to apply strict_filters on QnA pairs' metadata. For example, when combining several metadata filters, you can determine if you are concerned with all filters matching or just at least one filter matching. """ self.score_threshold = score_threshold self.timeout = timeout self.top = top self.strict_filters = strict_filters or [] self.context = context self.qna_id = qna_id self.is_test = is_test self.ranker_type = ranker_type self.strict_filters_join_operator = strict_filters_join_operator
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_options.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_options.py", "repo_id": "botbuilder-python", "token_count": 968 }
378
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import List from botbuilder.core import BotAdapter, TurnContext from botbuilder.schema import Activity, ConversationReference, ResourceResponse class NullAdapter(BotAdapter): """ This is a BotAdapter that does nothing on the Send operation, equivalent to piping to /dev/null. """ # pylint: disable=unused-argument async def send_activities( self, context: TurnContext, activities: List[Activity] ) -> List[ResourceResponse]: return [ResourceResponse()] async def update_activity(self, context: TurnContext, activity: Activity): raise NotImplementedError() async def delete_activity( self, context: TurnContext, reference: ConversationReference ): raise NotImplementedError()
botbuilder-python/libraries/botbuilder-ai/tests/luis/null_adapter.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/null_adapter.py", "repo_id": "botbuilder-python", "token_count": 260 }
379
{ "entities": { "Airline": [ [ "Delta" ] ], "datetime": [ { "timex": [ "T15" ], "type": "time" } ], "dimension": [ { "number": 3, "units": "Picometer" } ] }, "intents": { "Roles": { "score": 0.446264923 } }, "sentiment": { "label": "neutral", "score": 0.5 }, "text": "fly on delta at 3pm", "v3": { "options": { "includeAllIntents": false, "includeAPIResults": true, "includeInstanceData": false, "log": true, "preferExternalEntities": true, "slot": "production" }, "response": { "prediction": { "entities": { "Airline": [ [ "Delta" ] ], "datetimeV2": [ { "type": "time", "values": [ { "timex": "T15", "value": "15:00:00" } ] } ], "dimension": [ { "number": 3, "unit": "Picometer" } ] }, "intents": { "Roles": { "score": 0.446264923 } }, "normalizedQuery": "fly on delta at 3pm", "sentiment": { "label": "neutral", "score": 0.5 }, "topIntent": "Roles" }, "query": "fly on delta at 3pm" } } }
botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/Minimal_v3.json/0
{ "file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/Minimal_v3.json", "repo_id": "botbuilder-python", "token_count": 1119 }
380
{ "query": "4", "topScoringIntent": { "intent": "None", "score": 0.8575135 }, "entities": [ { "entity": "4", "type": "builtin.datetime.time", "startIndex": 0, "endIndex": 0, "resolution": { "comment": "ampm", "time": "T04" } } ] }
botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/V1DatetimeResolution.json/0
{ "file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/V1DatetimeResolution.json", "repo_id": "botbuilder-python", "token_count": 170 }
381
{ "activeLearningEnabled": false, "answers": [ { "questions": [ "how do I clean the stove?" ], "answer": "BaseCamp: You can use a damp rag to clean around the Power Pack", "score": 100, "id": 5, "source": "Editorial", "metadata": [] } ] }
botbuilder-python/libraries/botbuilder-ai/tests/qna/test_data/ReturnsAnswer.json/0
{ "file_path": "botbuilder-python/libraries/botbuilder-ai/tests/qna/test_data/ReturnsAnswer.json", "repo_id": "botbuilder-python", "token_count": 202 }
382
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """Flask Application Insights package.""" from .flask_telemetry_middleware import BotTelemetryMiddleware __all__ = ["BotTelemetryMiddleware"]
botbuilder-python/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/flask/__init__.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/flask/__init__.py", "repo_id": "botbuilder-python", "token_count": 62 }
383
"""Implements a CosmosDB based storage provider using partitioning for a bot. """ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import Dict, List from threading import Lock import json from azure.cosmos import documents, http_constants from jsonpickle.pickler import Pickler from jsonpickle.unpickler import Unpickler import azure.cosmos.cosmos_client as cosmos_client # pylint: disable=no-name-in-module,import-error import azure.cosmos.errors as cosmos_errors # pylint: disable=no-name-in-module,import-error from botbuilder.core.storage import Storage from botbuilder.azure import CosmosDbKeyEscape class CosmosDbPartitionedConfig: """The class for partitioned CosmosDB configuration for the Azure Bot Framework.""" def __init__( self, cosmos_db_endpoint: str = None, auth_key: str = None, database_id: str = None, container_id: str = None, cosmos_client_options: dict = None, container_throughput: int = 400, key_suffix: str = "", compatibility_mode: bool = False, **kwargs, ): """Create the Config object. :param cosmos_db_endpoint: The CosmosDB endpoint. :param auth_key: The authentication key for Cosmos DB. :param database_id: The database identifier for Cosmos DB instance. :param container_id: The container identifier. :param cosmos_client_options: The options for the CosmosClient. Currently only supports connection_policy and consistency_level :param container_throughput: The throughput set when creating the Container. Defaults to 400. :param key_suffix: The suffix to be added to every key. The keySuffix must contain only valid ComosDb key characters. (e.g. not: '\\', '?', '/', '#', '*') :param compatibility_mode: True if keys should be truncated in order to support previous CosmosDb max key length of 255. :return CosmosDbPartitionedConfig: """ self.__config_file = kwargs.get("filename") if self.__config_file: kwargs = json.load(open(self.__config_file)) self.cosmos_db_endpoint = cosmos_db_endpoint or kwargs.get("cosmos_db_endpoint") self.auth_key = auth_key or kwargs.get("auth_key") self.database_id = database_id or kwargs.get("database_id") self.container_id = container_id or kwargs.get("container_id") self.cosmos_client_options = cosmos_client_options or kwargs.get( "cosmos_client_options", {} ) self.container_throughput = container_throughput or kwargs.get( "container_throughput" ) self.key_suffix = key_suffix or kwargs.get("key_suffix") self.compatibility_mode = compatibility_mode or kwargs.get("compatibility_mode") class CosmosDbPartitionedStorage(Storage): """A CosmosDB based storage provider using partitioning for a bot.""" def __init__(self, config: CosmosDbPartitionedConfig): """Create the storage object. :param config: """ super(CosmosDbPartitionedStorage, self).__init__() self.config = config self.client = None self.database = None self.container = None self.compatability_mode_partition_key = False # Lock used for synchronizing container creation self.__lock = Lock() if config.key_suffix is None: config.key_suffix = "" if not config.key_suffix.__eq__(""): if config.compatibility_mode: raise Exception( "compatibilityMode cannot be true while using a keySuffix." ) suffix_escaped = CosmosDbKeyEscape.sanitize_key(config.key_suffix) if not suffix_escaped.__eq__(config.key_suffix): raise Exception( f"Cannot use invalid Row Key characters: {config.key_suffix} in keySuffix." ) async def read(self, keys: List[str]) -> Dict[str, object]: """Read storeitems from storage. :param keys: :return dict: """ if not keys: raise Exception("Keys are required when reading") await self.initialize() store_items = {} for key in keys: try: escaped_key = CosmosDbKeyEscape.sanitize_key( key, self.config.key_suffix, self.config.compatibility_mode ) read_item_response = self.client.ReadItem( self.__item_link(escaped_key), self.__get_partition_key(escaped_key) ) document_store_item = read_item_response if document_store_item: store_items[document_store_item["realId"]] = self.__create_si( document_store_item ) # When an item is not found a CosmosException is thrown, but we want to # return an empty collection so in this instance we catch and do not rethrow. # Throw for any other exception. except cosmos_errors.HTTPFailure as err: if ( err.status_code == cosmos_errors.http_constants.StatusCodes.NOT_FOUND ): continue raise err except Exception as err: raise err return store_items async def write(self, changes: Dict[str, object]): """Save storeitems to storage. :param changes: :return: """ if changes is None: raise Exception("Changes are required when writing") if not changes: return await self.initialize() for key, change in changes.items(): e_tag = None if isinstance(change, dict): e_tag = change.get("e_tag", None) elif hasattr(change, "e_tag"): e_tag = change.e_tag doc = { "id": CosmosDbKeyEscape.sanitize_key( key, self.config.key_suffix, self.config.compatibility_mode ), "realId": key, "document": self.__create_dict(change), } if e_tag == "": raise Exception("cosmosdb_storage.write(): etag missing") access_condition = { "accessCondition": {"type": "IfMatch", "condition": e_tag} } options = ( access_condition if e_tag != "*" and e_tag and e_tag != "" else None ) try: self.client.UpsertItem( database_or_Container_link=self.__container_link, document=doc, options=options, ) except cosmos_errors.HTTPFailure as err: raise err except Exception as err: raise err async def delete(self, keys: List[str]): """Remove storeitems from storage. :param keys: :return: """ await self.initialize() for key in keys: escaped_key = CosmosDbKeyEscape.sanitize_key( key, self.config.key_suffix, self.config.compatibility_mode ) try: self.client.DeleteItem( document_link=self.__item_link(escaped_key), options=self.__get_partition_key(escaped_key), ) except cosmos_errors.HTTPFailure as err: if ( err.status_code == cosmos_errors.http_constants.StatusCodes.NOT_FOUND ): continue raise err except Exception as err: raise err async def initialize(self): if not self.container: if not self.client: self.client = cosmos_client.CosmosClient( self.config.cosmos_db_endpoint, {"masterKey": self.config.auth_key}, self.config.cosmos_client_options.get("connection_policy", None), self.config.cosmos_client_options.get("consistency_level", None), ) if not self.database: with self.__lock: try: if not self.database: self.database = self.client.CreateDatabase( {"id": self.config.database_id} ) except cosmos_errors.HTTPFailure: self.database = self.client.ReadDatabase( "dbs/" + self.config.database_id ) self.__get_or_create_container() def __get_or_create_container(self): with self.__lock: container_def = { "id": self.config.container_id, "partitionKey": { "paths": ["/id"], "kind": documents.PartitionKind.Hash, }, } try: if not self.container: self.container = self.client.CreateContainer( "dbs/" + self.database["id"], container_def, {"offerThroughput": self.config.container_throughput}, ) except cosmos_errors.HTTPFailure as err: if err.status_code == http_constants.StatusCodes.CONFLICT: self.container = self.client.ReadContainer( "dbs/" + self.database["id"] + "/colls/" + container_def["id"] ) if "partitionKey" not in self.container: self.compatability_mode_partition_key = True else: paths = self.container["partitionKey"]["paths"] if "/partitionKey" in paths: self.compatability_mode_partition_key = True elif "/id" not in paths: raise Exception( f"Custom Partition Key Paths are not supported. {self.config.container_id} " "has a custom Partition Key Path of {paths[0]}." ) else: raise err def __get_partition_key(self, key: str) -> str: return None if self.compatability_mode_partition_key else {"partitionKey": key} @staticmethod def __create_si(result) -> object: """Create an object from a result out of CosmosDB. :param result: :return object: """ # get the document item from the result and turn into a dict doc = result.get("document") # read the e_tag from Cosmos if result.get("_etag"): doc["e_tag"] = result["_etag"] result_obj = Unpickler().restore(doc) # create and return the object return result_obj @staticmethod def __create_dict(store_item: object) -> Dict: """Return the dict of an object. This eliminates non_magic attributes and the e_tag. :param store_item: :return dict: """ # read the content json_dict = Pickler().flatten(store_item) if "e_tag" in json_dict: del json_dict["e_tag"] # loop through attributes and write and return a dict return json_dict def __item_link(self, identifier) -> str: """Return the item link of a item in the container. :param identifier: :return str: """ return self.__container_link + "/docs/" + identifier @property def __container_link(self) -> str: """Return the container link in the database. :param: :return str: """ return self.__database_link + "/colls/" + self.config.container_id @property def __database_link(self) -> str: """Return the database link. :return str: """ return "dbs/" + self.config.database_id
botbuilder-python/libraries/botbuilder-azure/botbuilder/azure/cosmosdb_partitioned_storage.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-azure/botbuilder/azure/cosmosdb_partitioned_storage.py", "repo_id": "botbuilder-python", "token_count": 5963 }
384
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import Awaitable, Callable, List, Union from .bot_state import BotState from .bot_state_set import BotStateSet from .middleware_set import Middleware from .turn_context import TurnContext class AutoSaveStateMiddleware(Middleware): def __init__(self, bot_states: Union[List[BotState], BotStateSet] = None): if bot_states is None: bot_states = [] if isinstance(bot_states, BotStateSet): self.bot_state_set: BotStateSet = bot_states else: self.bot_state_set: BotStateSet = BotStateSet(bot_states) def add(self, bot_state: BotState) -> "AutoSaveStateMiddleware": if bot_state is None: raise TypeError("Expected BotState") self.bot_state_set.add(bot_state) return self async def on_turn( self, context: TurnContext, logic: Callable[[TurnContext], Awaitable] ): await logic() await self.bot_state_set.save_all_changes(context, False)
botbuilder-python/libraries/botbuilder-core/botbuilder/core/auto_save_state_middleware.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/auto_save_state_middleware.py", "repo_id": "botbuilder-python", "token_count": 411 }
385
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from uuid import uuid4 from typing import Any, List from jsonpickle import Pickler from botbuilder.core import BotState, ConversationState, TurnContext, UserState from botbuilder.schema import Activity, ActivityTypes, ConversationReference from botframework.connector.auth import MicrosoftAppCredentials from .inspection_session import InspectionSession from .inspection_sessions_by_status import ( InspectionSessionsByStatus, DEFAULT_INSPECTION_SESSIONS_BY_STATUS, ) from .inspection_state import InspectionState from .interception_middleware import InterceptionMiddleware from .trace_activity import from_state, make_command_activity class InspectionMiddleware(InterceptionMiddleware): _COMMAND = "/INSPECT" def __init__( # pylint: disable=super-init-not-called self, inspection_state: InspectionState, user_state: UserState = None, conversation_state: ConversationState = None, credentials: MicrosoftAppCredentials = None, ): self.inspection_state = inspection_state self.inspection_state_accessor = inspection_state.create_property( "InspectionSessionByStatus" ) self.user_state = user_state self.conversation_state = conversation_state self.credentials = MicrosoftAppCredentials( credentials.microsoft_app_id if credentials else "", credentials.microsoft_app_password if credentials else "", ) async def process_command(self, context: TurnContext) -> Any: if context.activity.type == ActivityTypes.message and context.activity.text: original_text = context.activity.text TurnContext.remove_recipient_mention(context.activity) command = context.activity.text.strip().split(" ") if len(command) > 1 and command[0] == InspectionMiddleware._COMMAND: if len(command) == 2 and command[1] == "open": await self._process_open_command(context) return True if len(command) == 3 and command[1] == "attach": await self.process_attach_command(context, command[2]) return True context.activity.text = original_text return False async def _inbound(self, context: TurnContext, trace_activity: Activity) -> Any: if await self.process_command(context): return False, False session = await self._find_session(context) if session: if await self._invoke_send(context, session, trace_activity): return True, True return True, False async def _outbound( self, context: TurnContext, trace_activities: List[Activity] ) -> Any: session = await self._find_session(context) if session: for trace_activity in trace_activities: if not await self._invoke_send(context, session, trace_activity): break async def _trace_state(self, context: TurnContext) -> Any: session = await self._find_session(context) if session: if self.user_state: await self.user_state.load(context, False) if self.conversation_state: await self.conversation_state.load(context, False) bot_state = {} if self.user_state: bot_state["user_state"] = InspectionMiddleware._get_serialized_context( self.user_state, context ) if self.conversation_state: bot_state[ "conversation_state" ] = InspectionMiddleware._get_serialized_context( self.conversation_state, context ) await self._invoke_send(context, session, from_state(bot_state)) async def _process_open_command(self, context: TurnContext) -> Any: sessions = await self.inspection_state_accessor.get( context, DEFAULT_INSPECTION_SESSIONS_BY_STATUS ) session_id = self._open_command( sessions, TurnContext.get_conversation_reference(context.activity) ) await context.send_activity( make_command_activity( f"{InspectionMiddleware._COMMAND} attach {session_id}" ) ) await self.inspection_state.save_changes(context, False) async def process_attach_command( self, context: TurnContext, session_id: str ) -> None: sessions = await self.inspection_state_accessor.get( context, DEFAULT_INSPECTION_SESSIONS_BY_STATUS ) if self._attach_comamnd(context.activity.conversation.id, sessions, session_id): await context.send_activity( "Attached to session, all traffic is being replicated for inspection." ) else: await context.send_activity( f"Open session with id {session_id} does not exist." ) await self.inspection_state.save_changes(context, False) def _open_command( self, sessions: InspectionSessionsByStatus, conversation_reference: ConversationReference, ) -> str: session_id = str(uuid4()) sessions.opened_sessions[session_id] = conversation_reference return session_id def _attach_comamnd( self, conversation_id: str, sessions: InspectionSessionsByStatus, session_id: str, ) -> bool: inspection_session_state = sessions.opened_sessions.get(session_id) if inspection_session_state: sessions.attached_sessions[conversation_id] = inspection_session_state del sessions.opened_sessions[session_id] return True return False @staticmethod def _get_serialized_context(state: BotState, context: TurnContext): ctx = state.get(context) return Pickler(unpicklable=False).flatten(ctx) async def _find_session(self, context: TurnContext) -> Any: sessions = await self.inspection_state_accessor.get( context, DEFAULT_INSPECTION_SESSIONS_BY_STATUS ) conversation_reference = sessions.attached_sessions.get( context.activity.conversation.id ) if conversation_reference: return InspectionSession(conversation_reference, self.credentials) return None async def _invoke_send( self, context: TurnContext, session: InspectionSession, activity: Activity ) -> bool: if await session.send(activity): return True await self._clean_up_session(context) return False async def _clean_up_session(self, context: TurnContext) -> None: sessions = await self.inspection_state_accessor.get( context, DEFAULT_INSPECTION_SESSIONS_BY_STATUS ) del sessions.attached_sessions[context.activity.conversation.id] await self.inspection_state.save_changes(context, False)
botbuilder-python/libraries/botbuilder-core/botbuilder/core/inspection/inspection_middleware.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/inspection/inspection_middleware.py", "repo_id": "botbuilder-python", "token_count": 2950 }
386
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from .extended_user_token_provider import ExtendedUserTokenProvider from .user_token_provider import UserTokenProvider from .connector_client_builder import ConnectorClientBuilder __all__ = [ "ConnectorClientBuilder", "ExtendedUserTokenProvider", "UserTokenProvider", ]
botbuilder-python/libraries/botbuilder-core/botbuilder/core/oauth/__init__.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/oauth/__init__.py", "repo_id": "botbuilder-python", "token_count": 104 }
387
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. class BotFrameworkSkill: """ Registration for a BotFrameworkHttpProtocol based Skill endpoint. """ # pylint: disable=invalid-name def __init__(self, id: str = None, app_id: str = None, skill_endpoint: str = None): self.id = id self.app_id = app_id self.skill_endpoint = skill_endpoint
botbuilder-python/libraries/botbuilder-core/botbuilder/core/skills/bot_framework_skill.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/skills/bot_framework_skill.py", "repo_id": "botbuilder-python", "token_count": 154 }
388
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from .teams_activity_handler import TeamsActivityHandler from .teams_info import TeamsInfo from .teams_activity_extensions import ( teams_get_channel_id, teams_get_team_info, teams_notify_user, ) from .teams_sso_token_exchange_middleware import TeamsSSOTokenExchangeMiddleware __all__ = [ "TeamsActivityHandler", "TeamsInfo", "TeamsSSOTokenExchangeMiddleware", "teams_get_channel_id", "teams_get_team_info", "teams_notify_user", ]
botbuilder-python/libraries/botbuilder-core/botbuilder/core/teams/__init__.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/teams/__init__.py", "repo_id": "botbuilder-python", "token_count": 232 }
389
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import List from botbuilder.core import CardFactory from botbuilder.schema import ( ActionTypes, AnimationCard, Attachment, AudioCard, CardAction, CardImage, HeroCard, MediaUrl, OAuthCard, SigninCard, ThumbnailCard, ReceiptCard, VideoCard, ) def assert_attachment(attachment: Attachment, content_type: str): assert attachment is not None, "attachment not created" assert attachment.content_type == content_type, "attachment has wrong content_type" assert attachment.content is not None, "attachment missing context" def assert_actions(actions: List[CardAction], count: int, titles: List[str] = None): assert isinstance(actions, list), "actions is not a list" assert len(actions) == count, "wrong number of actions returned" for idx, action in enumerate(actions): # Added type checking assert isinstance( action, CardAction ), f"action[{idx}] is not a CardAction object" assert action.title is not None, f"title[{idx}] missing" if titles is not None: assert action.title == titles[idx], f"title[{idx}] invalid" assert action.type is not None, f"type[{idx}] missing" assert action.value is not None, f"value[{idx}] missing" def assert_images(images: List[CardImage], count: int, links: List[str] = None): assert isinstance(images, list), "images is not a list" assert len(images) == count, "wrong number of images returned" for idx, image in enumerate(images): # Added type checking assert isinstance(image, CardImage), f"image[{idx}] is not a CardImage object" assert image.url is not None, f"image url[{idx}] missing" if links is not None: assert image.url == links[idx], f"image url[{idx}] invalid" def assert_media(media: List[MediaUrl], count: int, links: List[str] = None): assert isinstance(media, list), "media is not a list" assert len(media) == count, "wrong number of media returned" for idx, m_value in enumerate(media): # Added type checking assert isinstance(m_value, MediaUrl), f"media[{idx}] is not a MediaUrl object" assert m_value.url is not None, f"media url[{idx}] missing" if links is not None: assert m_value.url == links[idx], f"media url[{idx}] invalid" class TestCardFactory: def test_should_create_adaptive_card_attachment(self): attachment = CardFactory.adaptive_card({"type": "AdaptiveCard"}) assert_attachment(attachment, CardFactory.content_types.adaptive_card) assert attachment.content["type"] is not None def test_should_raise_error_for_adaptive_card_if_card_is_not_dict(self): try: CardFactory.adaptive_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_animation_card_attachment(self): media = [MediaUrl(url="https://example.org/media")] card = AnimationCard(title="test", media=media) attachment = CardFactory.animation_card(card) assert_attachment(attachment, CardFactory.content_types.animation_card) assert attachment.content.title == "test", "wrong title" assert_media(attachment.content.media, 1, ["https://example.org/media"]) def test_should_raise_error_for_animation_card_if_card_is_not_animation_card(self): try: CardFactory.animation_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_audio_card_attachment(self): media = [MediaUrl(url="https://example.org/media")] card = AudioCard(title="test", media=media) attachment = CardFactory.audio_card(card) assert_attachment(attachment, CardFactory.content_types.audio_card) assert attachment.content.title == "test", "wrong title." assert_media(attachment.content.media, 1, ["https://example.org/media"]) def test_should_raise_error_for_audio_card_if_card_is_not_audio_card(self): try: CardFactory.audio_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_video_card_attachment(self): media = [MediaUrl(url="https://example.org/media")] card = VideoCard(title="test", media=media) attachment = CardFactory.video_card(card) assert_attachment(attachment, CardFactory.content_types.video_card) assert attachment.content.title == "test", "wrong title." assert_media(attachment.content.media, 1, ["https://example.org/media"]) def test_should_raise_error_for_video_card_if_card_is_not_video_card(self): try: CardFactory.video_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_hero_card_attachment(self): card = HeroCard(title="test") attachment = CardFactory.hero_card(card) assert_attachment(attachment, CardFactory.content_types.hero_card) assert attachment.content.title == "test", "wrong title." def test_should_raise_error_for_hero_card_if_card_is_not_hero_card(self): try: CardFactory.hero_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_thumbnail_card_attachment(self): card = ThumbnailCard(title="test") attachment = CardFactory.thumbnail_card(card) assert_attachment(attachment, CardFactory.content_types.thumbnail_card) assert attachment.content.title == "test", "wrong title." def test_should_raise_error_for_thumbnail_card_if_card_is_not_thumbnail_card(self): try: CardFactory.thumbnail_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_receipt_card_attachment(self): card = ReceiptCard(title="test") attachment = CardFactory.receipt_card(card) assert_attachment(attachment, CardFactory.content_types.receipt_card) assert attachment.content.title == "test", "wrong title." def test_should_raise_error_for_receipt_card_if_card_is_not_receipt_card(self): try: CardFactory.receipt_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_signin_card_attachment(self): button = CardAction( type=ActionTypes.signin, title="test", value="https://example.org/signin" ) card = SigninCard(title="test", buttons=[button]) attachment = CardFactory.signin_card(card) assert_attachment(attachment, CardFactory.content_types.signin_card) assert_actions(attachment.content.buttons, 1, ["test"]) assert attachment.content.buttons[0].type == "signin", "wrong action type." assert ( attachment.content.buttons[0].value == "https://example.org/signin" ), "wrong action value." def test_should_raise_error_for_signin_card_if_card_is_not_signin_card(self): try: CardFactory.signin_card(None) except TypeError: pass else: assert False, "should have raise TypeError" def test_should_create_oauth_card_attachment(self): button = CardAction( type=ActionTypes.signin, title="test", value="https://example.org/signin" ) card = OAuthCard(text="sign in", connection_name="test.com", buttons=[button]) attachment = CardFactory.oauth_card(card) assert_attachment(attachment, CardFactory.content_types.oauth_card) assert_actions(attachment.content.buttons, 1, ["test"]) assert attachment.content.text == "sign in", "wrong text" assert attachment.content.connection_name == "test.com", "wrong connection_name" def test_should_raise_error_for_oauth_card_if_card_is_not_oauth_card(self): try: CardFactory.oauth_card(None) except TypeError: pass else: assert False, "should have raise TypeError"
botbuilder-python/libraries/botbuilder-core/tests/test_card_factory.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-core/tests/test_card_factory.py", "repo_id": "botbuilder-python", "token_count": 3429 }
390
from botbuilder.schema import ( Activity, ActivityTypes, ConversationAccount, ChannelAccount, ) from botbuilder.core import TurnContext from botbuilder.core.adapters import TestAdapter class TestUtilities: @staticmethod def create_empty_context(): adapter = TestAdapter() activity = Activity( type=ActivityTypes.message, channel_id="EmptyContext", conversation=ConversationAccount(id="test"), from_property=ChannelAccount(id="empty@empty.context.org"), ) context = TurnContext(adapter, activity) return context
botbuilder-python/libraries/botbuilder-core/tests/test_utilities.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-core/tests/test_utilities.py", "repo_id": "botbuilder-python", "token_count": 236 }
391
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from enum import Enum class ListStyle(str, Enum): none = 0 auto = 1 in_line = 2 list_style = 3 suggested_action = 4 hero_card = 5
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/list_style.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/list_style.py", "repo_id": "botbuilder-python", "token_count": 89 }
392
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import inspect from hashlib import sha256 from typing import Dict from botbuilder.core import ( NullTelemetryClient, BotTelemetryClient, TurnContext, BotAssert, StatePropertyAccessor, ) from .dialog import Dialog from .dialog_state import DialogState class DialogSet: def __init__(self, dialog_state: StatePropertyAccessor = None): # pylint: disable=import-outside-toplevel if dialog_state is None: frame = inspect.currentframe().f_back try: # try to access the caller's "self" try: self_obj = frame.f_locals["self"] except KeyError: raise TypeError("DialogSet(): dialog_state cannot be None.") # Only ComponentDialog can initialize with None dialog_state from .component_dialog import ComponentDialog from .dialog_manager import DialogManager from .dialog_container import DialogContainer if not isinstance( self_obj, (ComponentDialog, DialogContainer, DialogManager) ): raise TypeError("DialogSet(): dialog_state cannot be None.") finally: # make sure to clean up the frame at the end to avoid ref cycles del frame self._dialog_state = dialog_state self.__telemetry_client = NullTelemetryClient() self._dialogs: Dict[str, Dialog] = {} self._version: str = None @property def telemetry_client(self) -> BotTelemetryClient: """ Gets the telemetry client for logging events. """ return self.__telemetry_client @telemetry_client.setter def telemetry_client(self, value: BotTelemetryClient) -> None: """ Sets the telemetry client for all dialogs in this set. """ if value is None: self.__telemetry_client = NullTelemetryClient() else: self.__telemetry_client = value for dialog in self._dialogs.values(): dialog.telemetry_client = self.__telemetry_client def get_version(self) -> str: """ Gets a unique string which represents the combined versions of all dialogs in this this dialogset. <returns>Version will change when any of the child dialogs version changes.</returns> """ if not self._version: version = "" for _, dialog in self._dialogs.items(): aux_version = dialog.get_version() if aux_version: version += aux_version self._version = sha256(version) return self._version def add(self, dialog: Dialog): """ Adds a new dialog to the set and returns the added dialog. :param dialog: The dialog to add. """ if dialog is None or not isinstance(dialog, Dialog): raise TypeError( "DialogSet.add(): dialog cannot be None and must be a Dialog or derived class." ) if dialog.id in self._dialogs: raise TypeError( "DialogSet.add(): A dialog with an id of '%s' already added." % dialog.id ) # dialog.telemetry_client = this._telemetry_client; self._dialogs[dialog.id] = dialog return self async def create_context(self, turn_context: TurnContext) -> "DialogContext": # This import prevents circular dependency issues # pylint: disable=import-outside-toplevel from .dialog_context import DialogContext # pylint: disable=unnecessary-lambda BotAssert.context_not_none(turn_context) if not self._dialog_state: raise RuntimeError( "DialogSet.CreateContextAsync(): DialogSet created with a null IStatePropertyAccessor." ) state: DialogState = await self._dialog_state.get( turn_context, lambda: DialogState() ) return DialogContext(self, turn_context, state) async def find(self, dialog_id: str) -> Dialog: """ Finds a dialog that was previously added to the set using add(dialog) :param dialog_id: ID of the dialog/prompt to look up. :return: The dialog if found, otherwise null. """ if not dialog_id: raise TypeError("DialogContext.find(): dialog_id cannot be None.") if dialog_id in self._dialogs: return self._dialogs[dialog_id] return None def find_dialog(self, dialog_id: str) -> Dialog: """ Finds a dialog that was previously added to the set using add(dialog) :param dialog_id: ID of the dialog/prompt to look up. :return: The dialog if found, otherwise null. """ if not dialog_id: raise TypeError("DialogContext.find(): dialog_id cannot be None.") if dialog_id in self._dialogs: return self._dialogs[dialog_id] return None def __str__(self): if self._dialogs: return "dialog set empty!" return " ".join(map(str, self._dialogs.keys()))
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py", "repo_id": "botbuilder-python", "token_count": 2265 }
393
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import copy from typing import Union, Callable class ObjectPath: """ Helper methods for working with json objects. """ @staticmethod def assign(start_object, overlay_object, default: Union[Callable, object] = None): """ Creates a new object by overlaying values in start_object with non-null values from overlay_object. :param start_object: dict or typed object, the target object to set values on :param overlay_object: dict or typed object, the item to overlay values form :param default: Provides a default object if both source and overlay are None :return: A copy of start_object, with values from overlay_object """ if start_object and overlay_object: merged = copy.deepcopy(start_object) def merge(target: dict, source: dict): key_set = set(target).union(set(source)) for key in key_set: target_value = target.get(key) source_value = source.get(key) # skip empty overlay items if source_value: if isinstance(source_value, dict): # merge dictionaries if not target_value: target[key] = copy.deepcopy(source_value) else: merge(target_value, source_value) elif not hasattr(source_value, "__dict__"): # simple type. just copy it. target[key] = copy.copy(source_value) elif not target_value: # the target doesn't have the value, but # the overlay does. just copy it. target[key] = copy.deepcopy(source_value) else: # recursive class copy merge(target_value.__dict__, source_value.__dict__) target_dict = merged if isinstance(merged, dict) else merged.__dict__ overlay_dict = ( overlay_object if isinstance(overlay_object, dict) else overlay_object.__dict__ ) merge(target_dict, overlay_dict) return merged if overlay_object: return copy.deepcopy(overlay_object) if start_object: return start_object if default: return default() if callable(default) else copy.deepcopy(default) return None @staticmethod def set_path_value(obj, path: str, value: object): """ Given an object evaluate a path to set the value. """ segments = ObjectPath.try_resolve_path(obj, path) if not segments: return current = obj for i in range(len(segments) - 1): segment = segments[i] if ObjectPath.is_int(segment): index = int(segment) next_obj = current[index] if not next_obj and len(current) <= index: # Expand list to index current += [None] * ((index + 1) - len(current)) next_obj = current[index] else: next_obj = ObjectPath.__get_object_property(current, segment) if not next_obj: # Create object or list based on next segment next_segment = segments[i + 1] if not ObjectPath.is_int(next_segment): ObjectPath.__set_object_segment(current, segment, {}) else: ObjectPath.__set_object_segment(current, segment, []) next_obj = ObjectPath.__get_object_property(current, segment) current = next_obj last_segment = segments[-1] ObjectPath.__set_object_segment(current, last_segment, value) @staticmethod def get_path_value( obj, path: str, default: Union[Callable, object] = None ) -> object: """ Get the value for a path relative to an object. """ value = ObjectPath.try_get_path_value(obj, path) if value: return value if default is None: raise KeyError(f"Key {path} not found") return default() if callable(default) else copy.deepcopy(default) @staticmethod def has_value(obj, path: str) -> bool: """ Does an object have a subpath. """ return ObjectPath.try_get_path_value(obj, path) is not None @staticmethod def remove_path_value(obj, path: str): """ Remove path from object. """ segments = ObjectPath.try_resolve_path(obj, path) if not segments: return current = obj for i in range(len(segments) - 1): segment = segments[i] current = ObjectPath.__resolve_segment(current, segment) if not current: return if current: last_segment = segments[-1] if ObjectPath.is_int(last_segment): current[int(last_segment)] = None else: current.pop(last_segment) @staticmethod def try_get_path_value(obj, path: str) -> object: """ Get the value for a path relative to an object. """ if not obj: return None if path is None: return None if not path: return obj segments = ObjectPath.try_resolve_path(obj, path) if not segments: return None result = ObjectPath.__resolve_segments(obj, segments) if not result: return None return result @staticmethod def __set_object_segment(obj, segment, value): val = ObjectPath.__get_normalized_value(value) if ObjectPath.is_int(segment): # the target is an list index = int(segment) # size the list if needed obj += [None] * ((index + 1) - len(obj)) obj[index] = val return # the target is a dictionary obj[segment] = val @staticmethod def __get_normalized_value(value): return value @staticmethod def try_resolve_path(obj, property_path: str, evaluate: bool = False) -> []: so_far = [] first = property_path[0] if property_path else " " if first in ("'", '"'): if not property_path.endswith(first): return None so_far.append(property_path[1 : len(property_path) - 2]) elif ObjectPath.is_int(property_path): so_far.append(int(property_path)) else: start = 0 i = 0 def emit(): nonlocal start, i segment = property_path[start:i] if segment: so_far.append(segment) start = i + 1 while i < len(property_path): char = property_path[i] if char in (".", "["): emit() if char == "[": nesting = 1 i += 1 while i < len(property_path): char = property_path[i] if char == "[": nesting += 1 elif char == "]": nesting -= 1 if nesting == 0: break i += 1 if nesting > 0: return None expr = property_path[start:i] start = i + 1 indexer = ObjectPath.try_resolve_path(obj, expr, True) if not indexer: return None result = indexer[0] if ObjectPath.is_int(result): so_far.append(int(result)) else: so_far.append(result) i += 1 emit() if evaluate: result = ObjectPath.__resolve_segments(obj, so_far) if not result: return None so_far.clear() so_far.append(result) return so_far @staticmethod def for_each_property(obj: object, action: Callable[[str, object], None]): if isinstance(obj, dict): for key, value in obj.items(): action(key, value) elif hasattr(obj, "__dict__"): for key, value in vars(obj).items(): action(key, value) @staticmethod def __resolve_segments(current, segments: []) -> object: result = current for segment in segments: result = ObjectPath.__resolve_segment(result, segment) if not result: return None return result @staticmethod def __resolve_segment(current, segment) -> object: if current: if ObjectPath.is_int(segment): current = current[int(segment)] else: current = ObjectPath.__get_object_property(current, segment) return current @staticmethod def __get_object_property(obj, property_name: str): # doing a case insensitive search property_name_lower = property_name.lower() matching = [obj[key] for key in obj if key.lower() == property_name_lower] return matching[0] if matching else None @staticmethod def is_int(value: str) -> bool: try: int(value) return True except ValueError: return False
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/object_path.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/object_path.py", "repo_id": "botbuilder-python", "token_count": 5146 }
394
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """ Result returned by a prompts recognizer function. """ class PromptRecognizerResult: def __init__(self, succeeded: bool = False, value: object = None): """Creates result returned by a prompts recognizer function.""" self.succeeded = succeeded self.value = value
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_recognizer_result.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_recognizer_result.py", "repo_id": "botbuilder-python", "token_count": 117 }
395
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import unittest from typing import List from botbuilder.dialogs.choices import Choice, ChoiceFactory, ChoiceFactoryOptions from botbuilder.schema import ( ActionTypes, Activity, ActivityTypes, Attachment, AttachmentLayoutTypes, CardAction, HeroCard, InputHints, SuggestedActions, ) from botframework.connector import Channels class ChoiceFactoryTest(unittest.TestCase): color_choices: List[Choice] = [Choice("red"), Choice("green"), Choice("blue")] choices_with_actions: List[Choice] = [ Choice( "ImBack", action=CardAction( type=ActionTypes.im_back, title="ImBack Action", value="ImBack Value" ), ), Choice( "MessageBack", action=CardAction( type=ActionTypes.message_back, title="MessageBack Action", value="MessageBack Value", ), ), Choice( "PostBack", action=CardAction( type=ActionTypes.post_back, title="PostBack Action", value="PostBack Value", ), ), ] def test_inline_should_render_choices_inline(self): activity = ChoiceFactory.inline(ChoiceFactoryTest.color_choices, "select from:") self.assertEqual("select from: (1) red, (2) green, or (3) blue", activity.text) def test_should_render_choices_as_a_list(self): activity = ChoiceFactory.list_style( ChoiceFactoryTest.color_choices, "select from:" ) self.assertEqual( "select from:\n\n 1. red\n 2. green\n 3. blue", activity.text ) def test_should_render_unincluded_numbers_choices_as_a_list(self): activity = ChoiceFactory.list_style( ChoiceFactoryTest.color_choices, "select from:", options=ChoiceFactoryOptions(include_numbers=False), ) self.assertEqual( "select from:\n\n - red\n - green\n - blue", activity.text ) def test_should_render_choices_as_suggested_actions(self): expected = Activity( type=ActivityTypes.message, text="select from:", input_hint=InputHints.expecting_input, suggested_actions=SuggestedActions( actions=[ CardAction(type=ActionTypes.im_back, value="red", title="red"), CardAction(type=ActionTypes.im_back, value="green", title="green"), CardAction(type=ActionTypes.im_back, value="blue", title="blue"), ] ), ) activity = ChoiceFactory.suggested_action( ChoiceFactoryTest.color_choices, "select from:" ) self.assertEqual(expected, activity) def test_should_render_choices_as_hero_card(self): expected = Activity( type=ActivityTypes.message, input_hint=InputHints.expecting_input, attachment_layout=AttachmentLayoutTypes.list, attachments=[ Attachment( content=HeroCard( text="select from:", buttons=[ CardAction( type=ActionTypes.im_back, value="red", title="red" ), CardAction( type=ActionTypes.im_back, value="green", title="green" ), CardAction( type=ActionTypes.im_back, value="blue", title="blue" ), ], ), content_type="application/vnd.microsoft.card.hero", ) ], ) activity = ChoiceFactory.hero_card( ChoiceFactoryTest.color_choices, "select from:" ) self.assertEqual(expected, activity) def test_should_automatically_choose_render_style_based_on_channel_type(self): expected = Activity( type=ActivityTypes.message, text="select from:", input_hint=InputHints.expecting_input, suggested_actions=SuggestedActions( actions=[ CardAction(type=ActionTypes.im_back, value="red", title="red"), CardAction(type=ActionTypes.im_back, value="green", title="green"), CardAction(type=ActionTypes.im_back, value="blue", title="blue"), ] ), ) activity = ChoiceFactory.for_channel( Channels.emulator, ChoiceFactoryTest.color_choices, "select from:" ) self.assertEqual(expected, activity) def test_should_choose_correct_styles_for_teams(self): expected = Activity( type=ActivityTypes.message, input_hint=InputHints.expecting_input, attachment_layout=AttachmentLayoutTypes.list, attachments=[ Attachment( content=HeroCard( text="select from:", buttons=[ CardAction( type=ActionTypes.im_back, value="red", title="red" ), CardAction( type=ActionTypes.im_back, value="green", title="green" ), CardAction( type=ActionTypes.im_back, value="blue", title="blue" ), ], ), content_type="application/vnd.microsoft.card.hero", ) ], ) activity = ChoiceFactory.for_channel( Channels.ms_teams, ChoiceFactoryTest.color_choices, "select from:" ) self.assertEqual(expected, activity) def test_should_include_choice_actions_in_suggested_actions(self): expected = Activity( type=ActivityTypes.message, text="select from:", input_hint=InputHints.expecting_input, suggested_actions=SuggestedActions( actions=[ CardAction( type=ActionTypes.im_back, value="ImBack Value", title="ImBack Action", ), CardAction( type=ActionTypes.message_back, value="MessageBack Value", title="MessageBack Action", ), CardAction( type=ActionTypes.post_back, value="PostBack Value", title="PostBack Action", ), ] ), ) activity = ChoiceFactory.suggested_action( ChoiceFactoryTest.choices_with_actions, "select from:" ) self.assertEqual(expected, activity) def test_should_include_choice_actions_in_hero_cards(self): expected = Activity( type=ActivityTypes.message, input_hint=InputHints.expecting_input, attachment_layout=AttachmentLayoutTypes.list, attachments=[ Attachment( content=HeroCard( text="select from:", buttons=[ CardAction( type=ActionTypes.im_back, value="ImBack Value", title="ImBack Action", ), CardAction( type=ActionTypes.message_back, value="MessageBack Value", title="MessageBack Action", ), CardAction( type=ActionTypes.post_back, value="PostBack Value", title="PostBack Action", ), ], ), content_type="application/vnd.microsoft.card.hero", ) ], ) activity = ChoiceFactory.hero_card( ChoiceFactoryTest.choices_with_actions, "select from:" ) self.assertEqual(expected, activity)
botbuilder-python/libraries/botbuilder-dialogs/tests/choices/test_choice_factory.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-dialogs/tests/choices/test_choice_factory.py", "repo_id": "botbuilder-python", "token_count": 4792 }
396
import aiounittest from botbuilder.dialogs import ObjectPath class Location: def __init__(self, lat: float = None, long: float = None): self.lat = lat self.long = long class Options: def __init__( self, first_name: str = None, last_name: str = None, age: int = None, boolean: bool = None, dictionary: dict = None, location: Location = None, ): self.first_name = first_name self.last_name = last_name self.age = age self.boolean = boolean self.dictionary = dictionary self.location = location class ObjectPathTests(aiounittest.AsyncTestCase): async def test_typed_only_default(self): default_options = Options( last_name="Smith", first_name="Fred", age=22, location=Location( lat=1.2312312, long=3.234234, ), ) overlay = Options() result = ObjectPath.assign(default_options, overlay) assert result.last_name == default_options.last_name assert result.first_name == default_options.first_name assert result.age == default_options.age assert result.boolean == default_options.boolean assert result.location.lat == default_options.location.lat assert result.location.long == default_options.location.long async def test_typed_only_overlay(self): default_options = Options() overlay = Options( last_name="Smith", first_name="Fred", age=22, location=Location( lat=1.2312312, long=3.234234, ), ) result = ObjectPath.assign(default_options, overlay) assert result.last_name == overlay.last_name assert result.first_name == overlay.first_name assert result.age == overlay.age assert result.boolean == overlay.boolean assert result.location.lat == overlay.location.lat assert result.location.long == overlay.location.long async def test_typed_full_overlay(self): default_options = Options( last_name="Smith", first_name="Fred", age=22, location=Location( lat=1.2312312, long=3.234234, ), dictionary={"one": 1, "two": 2}, ) overlay = Options( last_name="Grant", first_name="Eddit", age=32, location=Location( lat=2.2312312, long=2.234234, ), dictionary={"one": 99, "three": 3}, ) result = ObjectPath.assign(default_options, overlay) assert result.last_name == overlay.last_name assert result.first_name == overlay.first_name assert result.age == overlay.age assert result.boolean == overlay.boolean assert result.location.lat == overlay.location.lat assert result.location.long == overlay.location.long assert "one" in result.dictionary assert result.dictionary["one"] == 99 assert "two" in result.dictionary assert "three" in result.dictionary async def test_typed_partial_overlay(self): default_options = Options( last_name="Smith", first_name="Fred", age=22, location=Location( lat=1.2312312, long=3.234234, ), ) overlay = Options( last_name="Grant", ) result = ObjectPath.assign(default_options, overlay) assert result.last_name == overlay.last_name assert result.first_name == default_options.first_name assert result.age == default_options.age assert result.boolean == default_options.boolean assert result.location.lat == default_options.location.lat assert result.location.long == default_options.location.long async def test_typed_no_target(self): overlay = Options( last_name="Smith", first_name="Fred", age=22, location=Location( lat=1.2312312, long=3.234234, ), ) result = ObjectPath.assign(None, overlay) assert result.last_name == overlay.last_name assert result.first_name == overlay.first_name assert result.age == overlay.age assert result.boolean == overlay.boolean assert result.location.lat == overlay.location.lat assert result.location.long == overlay.location.long async def test_typed_no_overlay(self): default_options = Options( last_name="Smith", first_name="Fred", age=22, location=Location( lat=1.2312312, long=3.234234, ), ) result = ObjectPath.assign(default_options, None) assert result.last_name == default_options.last_name assert result.first_name == default_options.first_name assert result.age == default_options.age assert result.boolean == default_options.boolean assert result.location.lat == default_options.location.lat assert result.location.long == default_options.location.long async def test_no_target_or_overlay(self): result = ObjectPath.assign(None, None, Options) assert result async def test_dict_partial_overlay(self): default_options = { "last_name": "Smith", "first_name": "Fred", "age": 22, "location": Location( lat=1.2312312, long=3.234234, ), } overlay = { "last_name": "Grant", } result = ObjectPath.assign(default_options, overlay) assert result["last_name"] == overlay["last_name"] assert result["first_name"] == default_options["first_name"] assert result["age"] == default_options["age"] assert result["location"].lat == default_options["location"].lat assert result["location"].long == default_options["location"].long async def test_dict_to_typed_overlay(self): default_options = Options( last_name="Smith", first_name="Fred", age=22, location=Location( lat=1.2312312, long=3.234234, ), ) overlay = { "last_name": "Grant", } result = ObjectPath.assign(default_options, overlay) assert result.last_name == overlay["last_name"] assert result.first_name == default_options.first_name assert result.age == default_options.age assert result.boolean == default_options.boolean assert result.location.lat == default_options.location.lat assert result.location.long == default_options.location.long async def test_set_value(self): test = {} ObjectPath.set_path_value(test, "x.y.z", 15) ObjectPath.set_path_value(test, "x.p", "hello") ObjectPath.set_path_value(test, "foo", {"Bar": 15, "Blat": "yo"}) ObjectPath.set_path_value(test, "x.a[1]", "yabba") ObjectPath.set_path_value(test, "x.a[0]", "dabba") ObjectPath.set_path_value(test, "null", None) assert ObjectPath.get_path_value(test, "x.y.z") == 15 assert ObjectPath.get_path_value(test, "x.p") == "hello" assert ObjectPath.get_path_value(test, "foo.bar") == 15 assert not ObjectPath.try_get_path_value(test, "foo.Blatxxx") assert ObjectPath.try_get_path_value(test, "x.a[1]") == "yabba" assert ObjectPath.try_get_path_value(test, "x.a[0]") == "dabba" assert not ObjectPath.try_get_path_value(test, "null") async def test_remove_path_value(self): test = {} ObjectPath.set_path_value(test, "x.y.z", 15) ObjectPath.set_path_value(test, "x.p", "hello") ObjectPath.set_path_value(test, "foo", {"Bar": 15, "Blat": "yo"}) ObjectPath.set_path_value(test, "x.a[1]", "yabba") ObjectPath.set_path_value(test, "x.a[0]", "dabba") ObjectPath.remove_path_value(test, "x.y.z") with self.assertRaises(KeyError): ObjectPath.get_path_value(test, "x.y.z") assert ObjectPath.get_path_value(test, "x.y.z", 99) == 99 ObjectPath.remove_path_value(test, "x.a[1]") assert not ObjectPath.try_get_path_value(test, "x.a[1]") assert ObjectPath.try_get_path_value(test, "x.a[0]") == "dabba"
botbuilder-python/libraries/botbuilder-dialogs/tests/test_object_path.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-dialogs/tests/test_object_path.py", "repo_id": "botbuilder-python", "token_count": 4030 }
397
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from aiohttp import ClientSession, ClientResponse, ClientResponseError from botframework.connector import ( HttpClientBase, HttpClientFactory, HttpRequest, HttpResponseBase, ) class _HttpResponseImpl(HttpResponseBase): def __init__(self, client_response: ClientResponse) -> None: self._client_response = client_response @property def status_code(self): return self._client_response.status async def is_succesful(self) -> bool: try: self._client_response.raise_for_status() return True except ClientResponseError: return False async def read_content_str(self) -> str: return (await self._client_response.read()).decode() class _HttpClientImplementation(HttpClientBase): def __init__(self) -> None: self._session = ClientSession() async def post(self, *, request: HttpRequest) -> HttpResponseBase: aio_response = await self._session.post( request.request_uri, data=request.content, headers=request.headers ) return _HttpResponseImpl(aio_response) class AioHttpClientFactory(HttpClientFactory): def create_client(self) -> HttpClientBase: return _HttpClientImplementation()
botbuilder-python/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/aio_http_client_factory.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/aio_http_client_factory.py", "repo_id": "botbuilder-python", "token_count": 489 }
398
from asyncio import Future from unittest.mock import Mock, MagicMock from aiounittest import AsyncTestCase from botbuilder.integration.applicationinsights.aiohttp import ( bot_telemetry_middleware, aiohttp_telemetry_middleware, ) class TestAiohttpTelemetryMiddleware(AsyncTestCase): # pylint: disable=protected-access async def test_bot_telemetry_middleware(self): req = Mock() req.headers = {"Content-Type": "application/json"} req.json = MagicMock(return_value=Future()) req.json.return_value.set_result("mock body") async def handler(value): return value sut = await bot_telemetry_middleware(req, handler) assert "mock body" in aiohttp_telemetry_middleware._REQUEST_BODIES.values() aiohttp_telemetry_middleware._REQUEST_BODIES.clear() assert req == sut def test_retrieve_aiohttp_body(self): aiohttp_telemetry_middleware._REQUEST_BODIES = Mock() aiohttp_telemetry_middleware._REQUEST_BODIES.pop = Mock( return_value="test body" ) assert aiohttp_telemetry_middleware.retrieve_aiohttp_body() == "test body" aiohttp_telemetry_middleware._REQUEST_BODIES = {}
botbuilder-python/libraries/botbuilder-integration-applicationinsights-aiohttp/tests/test_aiohttp_telemetry_middleware.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-integration-applicationinsights-aiohttp/tests/test_aiohttp_telemetry_middleware.py", "repo_id": "botbuilder-python", "token_count": 493 }
399
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import aiounittest from botbuilder.schema import ( Activity, ConversationReference, ConversationAccount, ChannelAccount, Entity, ResourceResponse, Attachment, ) from botbuilder.schema._connector_client_enums import ActivityTypes class TestActivity(aiounittest.AsyncTestCase): def test_constructor(self): # Arrange activity = Activity() # Assert self.assertIsNotNone(activity) self.assertIsNone(activity.type) self.assertIsNone(activity.id) self.assertIsNone(activity.timestamp) self.assertIsNone(activity.local_timestamp) self.assertIsNone(activity.local_timezone) self.assertIsNone(activity.service_url) self.assertIsNone(activity.channel_id) self.assertIsNone(activity.from_property) self.assertIsNone(activity.conversation) self.assertIsNone(activity.recipient) self.assertIsNone(activity.text_format) self.assertIsNone(activity.attachment_layout) self.assertIsNone(activity.members_added) self.assertIsNone(activity.members_removed) self.assertIsNone(activity.reactions_added) self.assertIsNone(activity.reactions_removed) self.assertIsNone(activity.topic_name) self.assertIsNone(activity.history_disclosed) self.assertIsNone(activity.locale) self.assertIsNone(activity.text) self.assertIsNone(activity.speak) self.assertIsNone(activity.input_hint) self.assertIsNone(activity.summary) self.assertIsNone(activity.suggested_actions) self.assertIsNone(activity.attachments) self.assertIsNone(activity.entities) self.assertIsNone(activity.channel_data) self.assertIsNone(activity.action) self.assertIsNone(activity.reply_to_id) self.assertIsNone(activity.label) self.assertIsNone(activity.value_type) self.assertIsNone(activity.value) self.assertIsNone(activity.name) self.assertIsNone(activity.relates_to) self.assertIsNone(activity.code) self.assertIsNone(activity.expiration) self.assertIsNone(activity.importance) self.assertIsNone(activity.delivery_mode) self.assertIsNone(activity.listen_for) self.assertIsNone(activity.text_highlights) self.assertIsNone(activity.semantic_action) self.assertIsNone(activity.caller_id) def test_apply_conversation_reference(self): # Arrange activity = self.__create_activity() conversation_reference = ConversationReference( channel_id="123", service_url="serviceUrl", conversation=ConversationAccount(id="456"), user=ChannelAccount(id="abc"), bot=ChannelAccount(id="def"), activity_id="12345", locale="en-uS", ) # Act activity.apply_conversation_reference(reference=conversation_reference) # Assert self.assertEqual(conversation_reference.channel_id, activity.channel_id) self.assertEqual(conversation_reference.locale, activity.locale) self.assertEqual(conversation_reference.service_url, activity.service_url) self.assertEqual( conversation_reference.conversation.id, activity.conversation.id ) self.assertEqual(conversation_reference.bot.id, activity.from_property.id) self.assertEqual(conversation_reference.user.id, activity.recipient.id) self.assertEqual(conversation_reference.activity_id, activity.reply_to_id) def test_apply_conversation_reference_with_is_incoming_true(self): # Arrange activity = self.__create_activity() conversation_reference = ConversationReference( channel_id="cr_123", service_url="cr_serviceUrl", conversation=ConversationAccount(id="cr_456"), user=ChannelAccount(id="cr_abc"), bot=ChannelAccount(id="cr_def"), activity_id="cr_12345", locale="en-uS", ) # Act activity.apply_conversation_reference( reference=conversation_reference, is_incoming=True ) # Assert self.assertEqual(conversation_reference.channel_id, activity.channel_id) self.assertEqual(conversation_reference.locale, activity.locale) self.assertEqual(conversation_reference.service_url, activity.service_url) self.assertEqual( conversation_reference.conversation.id, activity.conversation.id ) self.assertEqual(conversation_reference.user.id, activity.from_property.id) self.assertEqual(conversation_reference.bot.id, activity.recipient.id) self.assertEqual(conversation_reference.activity_id, activity.id) def test_as_contact_relation_update_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.contact_relation_update # Act result = activity.as_contact_relation_update_activity() # Assert self.assertEqual(result.type, ActivityTypes.contact_relation_update) def test_as_contact_relation_update_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_contact_relation_update_activity() # Assert self.assertIsNone(result) def test_as_conversation_update_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.conversation_update # Act result = activity.as_conversation_update_activity() # Assert self.assertEqual(result.type, ActivityTypes.conversation_update) def test_as_conversation_update_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_conversation_update_activity() # Assert self.assertIsNone(result) def test_as_end_of_conversation_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.end_of_conversation # Act result = activity.as_end_of_conversation_activity() # Assert self.assertEqual(result.type, ActivityTypes.end_of_conversation) def test_as_end_of_conversation_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_end_of_conversation_activity() # Assert self.assertIsNone(result) def test_as_event_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.event # Act result = activity.as_event_activity() # Assert self.assertEqual(result.type, ActivityTypes.event) def test_as_event_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_event_activity() # Assert self.assertIsNone(result) def test_as_handoff_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.handoff # Act result = activity.as_handoff_activity() # Assert self.assertEqual(result.type, ActivityTypes.handoff) def test_as_handoff_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_handoff_activity() # Assert self.assertIsNone(result) def test_as_installation_update_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.installation_update # Act result = activity.as_installation_update_activity() # Assert self.assertEqual(result.type, ActivityTypes.installation_update) def test_as_installation_update_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_installation_update_activity() # Assert self.assertIsNone(result) def test_as_invoke_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.invoke # Act result = activity.as_invoke_activity() # Assert self.assertEqual(result.type, ActivityTypes.invoke) def test_as_invoke_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_invoke_activity() # Assert self.assertIsNone(result) def test_as_message_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_message_activity() # Assert self.assertEqual(result.type, ActivityTypes.message) def test_as_message_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.invoke # Act result = activity.as_message_activity() # Assert self.assertIsNone(result) def test_as_message_activity_type_none(self): # Arrange activity = self.__create_activity() activity.type = None # Act result = activity.as_message_activity() # Assert self.assertIsNone(result) def test_as_message_delete_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message_delete # Act result = activity.as_message_delete_activity() # Assert self.assertEqual(result.type, ActivityTypes.message_delete) def test_as_message_delete_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_message_delete_activity() # Assert self.assertIsNone(result) def test_as_message_reaction_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message_reaction # Act result = activity.as_message_reaction_activity() # Assert self.assertEqual(result.type, ActivityTypes.message_reaction) def test_as_message_reaction_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_message_reaction_activity() # Assert self.assertIsNone(result) def test_as_message_update_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message_update # Act result = activity.as_message_update_activity() # Assert self.assertEqual(result.type, ActivityTypes.message_update) def test_as_message_update_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_message_update_activity() # Assert self.assertIsNone(result) def test_as_suggestion_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.suggestion # Act result = activity.as_suggestion_activity() # Assert self.assertEqual(result.type, ActivityTypes.suggestion) def test_as_suggestion_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_suggestion_activity() # Assert self.assertIsNone(result) def test_as_trace_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.trace # Act result = activity.as_trace_activity() # Assert self.assertEqual(result.type, ActivityTypes.trace) def test_as_trace_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_trace_activity() # Assert self.assertIsNone(result) def test_as_typing_activity_return_activity(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.typing # Act result = activity.as_typing_activity() # Assert self.assertEqual(result.type, ActivityTypes.typing) def test_as_typing_activity_return_none(self): # Arrange activity = self.__create_activity() activity.type = ActivityTypes.message # Act result = activity.as_typing_activity() # Assert self.assertIsNone(result) def test_create_contact_relation_update_activity(self): # Act result = Activity.create_contact_relation_update_activity() # Assert self.assertEqual(result.type, ActivityTypes.contact_relation_update) def test_create_conversation_update_activity(self): # Act result = Activity.create_conversation_update_activity() # Assert self.assertEqual(result.type, ActivityTypes.conversation_update) def test_create_end_of_conversation_activity(self): # Act result = Activity.create_end_of_conversation_activity() # Assert self.assertEqual(result.type, ActivityTypes.end_of_conversation) def test_create_event_activity(self): # Act result = Activity.create_event_activity() # Assert self.assertEqual(result.type, ActivityTypes.event) def test_create_handoff_activity(self): # Act result = Activity.create_handoff_activity() # Assert self.assertEqual(result.type, ActivityTypes.handoff) def test_create_invoke_activity(self): # Act result = Activity.create_invoke_activity() # Assert self.assertEqual(result.type, ActivityTypes.invoke) def test_create_message_activity(self): # Act result = Activity.create_message_activity() # Assert self.assertEqual(result.type, ActivityTypes.message) def test_create_reply(self): # Arrange activity = self.__create_activity() text = "test reply" locale = "en-us" # Act result = activity.create_reply(text=text, locale=locale) # Assert self.assertEqual(result.text, text) self.assertEqual(result.locale, locale) self.assertEqual(result.type, ActivityTypes.message) def test_create_reply_without_arguments(self): # Arrange activity = self.__create_activity() # Act result = activity.create_reply() # Assert self.assertEqual(result.type, ActivityTypes.message) self.assertEqual(result.text, "") self.assertEqual(result.locale, activity.locale) def test_create_trace(self): # Arrange activity = self.__create_activity() name = "test-activity" value_type = "string" value = "test-value" label = "test-label" # Act result = activity.create_trace( name=name, value_type=value_type, value=value, label=label ) # Assert self.assertEqual(result.type, ActivityTypes.trace) self.assertEqual(result.name, name) self.assertEqual(result.value_type, value_type) self.assertEqual(result.value, value) self.assertEqual(result.label, label) def test_create_trace_activity_no_recipient(self): # Arrange activity = self.__create_activity() activity.recipient = None # Act result = activity.create_trace("test") # Assert self.assertIsNone(result.from_property.id) self.assertIsNone(result.from_property.name) def test_crete_trace_activity_no_value_type(self): # Arrange name = "test-activity" value = "test-value" label = "test-label" # Act result = Activity.create_trace_activity(name=name, value=value, label=label) # Assert self.assertEqual(result.type, ActivityTypes.trace) self.assertEqual(result.value_type, type(value)) self.assertEqual(result.label, label) def test_create_trace_activity(self): # Arrange name = "test-activity" value_type = "string" value = "test-value" label = "test-label" # Act result = Activity.create_trace_activity( name=name, value_type=value_type, value=value, label=label ) # Assert self.assertEqual(result.type, ActivityTypes.trace) self.assertEqual(result.name, name) self.assertEqual(result.value_type, value_type) self.assertEqual(result.label, label) def test_create_typing_activity(self): # Act result = Activity.create_typing_activity() # Assert self.assertEqual(result.type, ActivityTypes.typing) def test_get_conversation_reference(self): # Arrange activity = self.__create_activity() # Act result = activity.get_conversation_reference() # Assert self.assertEqual(activity.id, result.activity_id) self.assertEqual(activity.from_property.id, result.user.id) self.assertEqual(activity.recipient.id, result.bot.id) self.assertEqual(activity.conversation.id, result.conversation.id) self.assertEqual(activity.channel_id, result.channel_id) self.assertEqual(activity.locale, result.locale) self.assertEqual(activity.service_url, result.service_url) def test_get_mentions(self): # Arrange mentions = [Entity(type="mention"), Entity(type="reaction")] activity = Activity(entities=mentions) # Act result = Activity.get_mentions(activity) # Assert self.assertEqual(len(result), 1) self.assertEqual(result[0].type, "mention") def test_get_reply_conversation_reference(self): # Arrange activity = self.__create_activity() reply = ResourceResponse(id="1234") # Act result = activity.get_reply_conversation_reference(reply=reply) # Assert self.assertEqual(reply.id, result.activity_id) self.assertEqual(activity.from_property.id, result.user.id) self.assertEqual(activity.recipient.id, result.bot.id) self.assertEqual(activity.conversation.id, result.conversation.id) self.assertEqual(activity.channel_id, result.channel_id) self.assertEqual(activity.locale, result.locale) self.assertEqual(activity.service_url, result.service_url) def test_has_content_empty(self): # Arrange activity_empty = Activity() # Act result_empty = activity_empty.has_content() # Assert self.assertEqual(result_empty, False) def test_has_content_with_text(self): # Arrange activity_with_text = Activity(text="test-text") # Act result_with_text = activity_with_text.has_content() # Assert self.assertEqual(result_with_text, True) def test_has_content_with_summary(self): # Arrange activity_with_summary = Activity(summary="test-summary") # Act result_with_summary = activity_with_summary.has_content() # Assert self.assertEqual(result_with_summary, True) def test_has_content_with_attachment(self): # Arrange activity_with_attachment = Activity(attachments=[Attachment()]) # Act result_with_attachment = activity_with_attachment.has_content() # Assert self.assertEqual(result_with_attachment, True) def test_has_content_with_channel_data(self): # Arrange activity_with_channel_data = Activity(channel_data="test-channel-data") # Act result_with_channel_data = activity_with_channel_data.has_content() # Assert self.assertEqual(result_with_channel_data, True) def test_is_from_streaming_connection(self): # Arrange non_streaming = [ "http://yayay.com", "https://yayay.com", "HTTP://yayay.com", "HTTPS://yayay.com", ] streaming = [ "urn:botframework:WebSocket:wss://beep.com", "urn:botframework:WebSocket:http://beep.com", "URN:botframework:WebSocket:wss://beep.com", "URN:botframework:WebSocket:http://beep.com", ] activity = self.__create_activity() activity.service_url = None # Assert self.assertEqual(activity.is_from_streaming_connection(), False) for s in non_streaming: activity.service_url = s self.assertEqual(activity.is_from_streaming_connection(), False) for s in streaming: activity.service_url = s self.assertEqual(activity.is_from_streaming_connection(), True) @staticmethod def __create_activity() -> Activity: account1 = ChannelAccount( id="ChannelAccount_Id_1", name="ChannelAccount_Name_1", aad_object_id="ChannelAccount_aadObjectId_1", role="ChannelAccount_Role_1", ) account2 = ChannelAccount( id="ChannelAccount_Id_2", name="ChannelAccount_Name_2", aad_object_id="ChannelAccount_aadObjectId_2", role="ChannelAccount_Role_2", ) conversation_account = ConversationAccount( conversation_type="a", id="123", is_group=True, name="Name", role="ConversationAccount_Role", ) activity = Activity( id="123", from_property=account1, recipient=account2, conversation=conversation_account, channel_id="ChannelId123", locale="en-uS", service_url="ServiceUrl123", ) return activity
botbuilder-python/libraries/botbuilder-schema/tests/test_activity.py/0
{ "file_path": "botbuilder-python/libraries/botbuilder-schema/tests/test_activity.py", "repo_id": "botbuilder-python", "token_count": 9895 }
400
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import Dict, List from botbuilder.schema import Activity, TokenResponse from botframework.connector.token_api import TokenApiClientConfiguration from botframework.connector.token_api.aio import TokenApiClient from botframework.connector.token_api.models import ( SignInUrlResponse, TokenExchangeRequest, TokenStatus, ) from .app_credentials import AppCredentials from .user_token_client import UserTokenClient class _UserTokenClientImpl(UserTokenClient): def __init__( self, app_id: str, credentials: AppCredentials, oauth_endpoint: str, client_configuration: TokenApiClientConfiguration = None, ) -> None: super().__init__() self._app_id = app_id self._client = TokenApiClient(credentials, oauth_endpoint) if client_configuration: self._client.config = client_configuration async def get_user_token( self, user_id: str, connection_name: str, channel_id: str, magic_code: str ) -> TokenResponse: if user_id is None or not isinstance(user_id, str): raise TypeError("user_id") if connection_name is None or not isinstance(connection_name, str): raise TypeError("connection_name") if channel_id is None or not isinstance(channel_id, str): raise TypeError("channel_id") result = await self._client.user_token.get_token( user_id, connection_name, channel_id=channel_id, code=magic_code ) if result is None or result.token is None: return None return result async def get_sign_in_resource( self, connection_name: str, activity: Activity, final_redirect: str ) -> SignInUrlResponse: if connection_name is None or not isinstance(connection_name, str): raise TypeError("connection_name") if activity is None or not isinstance(activity, Activity): raise TypeError("activity") result = await self._client.bot_sign_in.get_sign_in_resource( UserTokenClient.create_token_exchange_state( self._app_id, connection_name, activity ), final_redirect=final_redirect, ) return result async def sign_out_user(self, user_id: str, connection_name: str, channel_id: str): if user_id is None or not isinstance(user_id, str): raise TypeError("user_id") if connection_name is None or not isinstance(connection_name, str): raise TypeError("connection_name") if channel_id is None or not isinstance(channel_id, str): raise TypeError("channel_id") await self._client.user_token.sign_out(user_id, connection_name, channel_id) async def get_token_status( self, user_id: str, channel_id: str, include_filter: str ) -> List[TokenStatus]: if user_id is None or not isinstance(user_id, str): raise TypeError("user_id") if channel_id is None or not isinstance(channel_id, str): raise TypeError("channel_id") result = await self._client.user_token.get_token_status( user_id, channel_id, include_filter ) return result async def get_aad_tokens( self, user_id: str, connection_name: str, resource_urls: List[str], channel_id: str, ) -> Dict[str, TokenResponse]: if user_id is None or not isinstance(user_id, str): raise TypeError("user_id") if connection_name is None or not isinstance(connection_name, str): raise TypeError("connection_name") if channel_id is None or not isinstance(channel_id, str): raise TypeError("channel_id") result = await self._client.user_token.get_aad_tokens( user_id, connection_name, channel_id, resource_urls ) return result async def exchange_token( self, user_id: str, connection_name: str, channel_id: str, exchange_request: TokenExchangeRequest, ) -> TokenResponse: if user_id is None or not isinstance(user_id, str): raise TypeError("user_id") if connection_name is None or not isinstance(connection_name, str): raise TypeError("connection_name") if channel_id is None or not isinstance(channel_id, str): raise TypeError("channel_id") (uri, token) = ( (exchange_request.uri, exchange_request.token) if exchange_request else (None, None) ) result = await self._client.user_token.exchange_async( user_id, connection_name, channel_id, uri, token ) return result
botbuilder-python/libraries/botframework-connector/botframework/connector/auth/_user_token_client_impl.py/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/auth/_user_token_client_impl.py", "repo_id": "botbuilder-python", "token_count": 2009 }
401
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from abc import ABC from .authentication_configuration import AuthenticationConfiguration from .authentication_constants import AuthenticationConstants from .claims_identity import ClaimsIdentity from .credential_provider import CredentialProvider from .government_constants import GovernmentConstants from .jwt_token_extractor import JwtTokenExtractor from .verify_options import VerifyOptions class GovernmentChannelValidation(ABC): OPEN_ID_METADATA_ENDPOINT = "" TO_BOT_FROM_GOVERNMENT_CHANNEL_TOKEN_VALIDATION_PARAMETERS = VerifyOptions( issuer=[GovernmentConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER], audience=None, clock_tolerance=5 * 60, ignore_expiration=False, ) @staticmethod async def authenticate_channel_token( auth_header: str, credentials: CredentialProvider, channel_id: str, auth_configuration: AuthenticationConfiguration = None, ) -> ClaimsIdentity: auth_configuration = auth_configuration or AuthenticationConfiguration() endpoint = ( GovernmentChannelValidation.OPEN_ID_METADATA_ENDPOINT if GovernmentChannelValidation.OPEN_ID_METADATA_ENDPOINT else GovernmentConstants.TO_BOT_FROM_CHANNEL_OPENID_METADATA_URL ) token_extractor = JwtTokenExtractor( GovernmentChannelValidation.TO_BOT_FROM_GOVERNMENT_CHANNEL_TOKEN_VALIDATION_PARAMETERS, endpoint, AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS, ) identity: ClaimsIdentity = await token_extractor.get_identity_from_auth_header( auth_header, channel_id, auth_configuration.required_endorsements ) return await GovernmentChannelValidation.validate_identity( identity, credentials ) @staticmethod async def authenticate_channel_token_with_service_url( auth_header: str, credentials: CredentialProvider, service_url: str, channel_id: str, auth_configuration: AuthenticationConfiguration = None, ) -> ClaimsIdentity: identity: ClaimsIdentity = ( await GovernmentChannelValidation.authenticate_channel_token( auth_header, credentials, channel_id, auth_configuration ) ) service_url_claim: str = identity.get_claim_value( AuthenticationConstants.SERVICE_URL_CLAIM ) if service_url_claim != service_url: raise PermissionError("Unauthorized. service_url claim do not match.") return identity @staticmethod async def validate_identity( identity: ClaimsIdentity, credentials: CredentialProvider ) -> ClaimsIdentity: if identity is None: # No valid identity. Not Authorized. raise PermissionError("Unauthorized. No valid identity.") if not identity.is_authenticated: # The token is in some way invalid. Not Authorized. raise PermissionError("Unauthorized. Is not authenticated.") # Now check that the AppID in the claim set matches # what we're looking for. Note that in a multi-tenant bot, this value # comes from developer code that may be reaching out to a service, hence the # Async validation. # Look for the "aud" claim, but only if issued from the Bot Framework if ( identity.get_claim_value(AuthenticationConstants.ISSUER_CLAIM) != GovernmentConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER ): # The relevant Audience Claim MUST be present. Not Authorized. raise PermissionError("Unauthorized. Issuer claim MUST be present.") # The AppId from the claim in the token must match the AppId specified by the developer. # In this case, the token is destined for the app, so we find the app ID in the audience claim. aud_claim: str = identity.get_claim_value( AuthenticationConstants.AUDIENCE_CLAIM ) if not await credentials.is_valid_appid(aud_claim or ""): # The AppId is not valid or not present. Not Authorized. raise PermissionError( f"Unauthorized. Invalid AppId passed on token: { aud_claim }" ) return identity
botbuilder-python/libraries/botframework-connector/botframework/connector/auth/government_channel_validation.py/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/auth/government_channel_validation.py", "repo_id": "botbuilder-python", "token_count": 1707 }
402
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from abc import ABC, abstractmethod from .http_request import HttpRequest from .http_response_base import HttpResponseBase class HttpClientBase(ABC): @abstractmethod async def post(self, *, request: HttpRequest) -> HttpResponseBase: raise NotImplementedError()
botbuilder-python/libraries/botframework-connector/botframework/connector/http_client_base.py/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/http_client_base.py", "repo_id": "botbuilder-python", "token_count": 112 }
403
[bdist_wheel] universal=1
botbuilder-python/libraries/botframework-connector/setup.cfg/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/setup.cfg", "repo_id": "botbuilder-python", "token_count": 10 }
404
interactions: - request: body: null headers: Accept: [application/json] Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] Content-Length: ['0'] Content-Type: [application/json; charset=utf-8] User-Agent: [python/3.6.2 (Windows-10-10.0.16299-SP0) requests/2.18.1 msrest/0.4.23 azure-botframework-connector/3.0] method: DELETE uri: https://slack.botframework.com/v3/conversations/INVALID_ID/activities/INVALID_ID response: body: {string: "{\r\n \"error\": {\r\n \"code\": \"ServiceError\",\r\n \ \ \"message\": \"Invalid ConversationId: INVALID_ID\"\r\n }\r\n}"} headers: cache-control: [no-cache] content-length: ['105'] content-type: [application/json; charset=utf-8] date: ['Fri, 29 Dec 2017 18:07:23 GMT'] expires: ['-1'] pragma: [no-cache] request-context: ['appId=cid-v1:6814484e-c0d5-40ea-9dba-74ff29ca4f62'] server: [Microsoft-IIS/10.0] strict-transport-security: [max-age=31536000] x-powered-by: [ASP.NET] status: {code: 400, message: Bad Request} version: 1
botbuilder-python/libraries/botframework-connector/tests/recordings/test_conversations_delete_activity_with_invalid_conversation_id_fails.yaml/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/tests/recordings/test_conversations_delete_activity_with_invalid_conversation_id_fails.yaml", "repo_id": "botbuilder-python", "token_count": 523 }
405
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import aiounittest from botframework.connector.auth import AppCredentials, AuthenticationConstants class AppCredentialsTests(aiounittest.AsyncTestCase): @staticmethod def test_should_not_send_token_for_anonymous(): # AppID is None app_creds_none = AppCredentials(app_id=None) assert app_creds_none.signed_session().headers.get("Authorization") is None # AppID is anonymous skill app_creds_anon = AppCredentials( app_id=AuthenticationConstants.ANONYMOUS_SKILL_APP_ID ) assert app_creds_anon.signed_session().headers.get("Authorization") is None def test_constructor(): should_default_to_channel_scope = AppCredentials() assert ( should_default_to_channel_scope.oauth_scope == AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE ) should_default_to_custom_scope = AppCredentials(oauth_scope="customScope") assert should_default_to_custom_scope.oauth_scope == "customScope"
botbuilder-python/libraries/botframework-connector/tests/test_app_credentials.py/0
{ "file_path": "botbuilder-python/libraries/botframework-connector/tests/test_app_credentials.py", "repo_id": "botbuilder-python", "token_count": 410 }
406
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from typing import Awaitable, Callable from botframework.streaming.payloads.models import Header class SendPacket: def __init__( self, *, header: Header, payload: object, is_length_known: bool, sent_callback: Callable[[Header], Awaitable] ): self.header = header self.payload = payload self.is_length_known = is_length_known self.sent_callback = sent_callback
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payload_transport/send_packet.py/0
{ "file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payload_transport/send_packet.py", "repo_id": "botbuilder-python", "token_count": 214 }
407
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from .header import Header from .payload_types import PayloadTypes from .request_payload import RequestPayload from .response_payload import ResponsePayload from .serializable import Serializable from .stream_description import StreamDescription __all__ = [ "Header", "PayloadTypes", "RequestPayload", "ResponsePayload", "Serializable", "StreamDescription", ]
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/models/__init__.py/0
{ "file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/models/__init__.py", "repo_id": "botbuilder-python", "token_count": 137 }
408
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import json from uuid import UUID, uuid4 from typing import List, Union from msrest.serialization import Model from botframework.streaming.payloads import ResponseMessageStream from botframework.streaming.payloads.models import Serializable class StreamingRequest: GET = "GET" POST = "POST" PUT = "PUT" DELETE = "DELETE" def __init__( self, *, verb: str = None, path: str = None, streams: List[ResponseMessageStream] = None, ): self.verb = verb self.path = path self.streams = streams @staticmethod def create_request( method: str, path: str = None, body: object = None ) -> "StreamingRequest": if not method: return None request = StreamingRequest( verb=method, path=path, ) if body: request.add_stream(body) return request @staticmethod def create_get(path: str = None, body: object = None) -> "StreamingRequest": return StreamingRequest.create_request("GET", path, body) @staticmethod def create_post(path: str = None, body: object = None) -> "StreamingRequest": return StreamingRequest.create_request("POST", path, body) @staticmethod def create_put(path: str = None, body: object = None) -> "StreamingRequest": return StreamingRequest.create_request("PUT", path, body) @staticmethod def create_delete(path: str = None, body: object = None) -> "StreamingRequest": return StreamingRequest.create_request("DELETE", path, body) def set_body(self, body: Union[str, Serializable, Model, bytes]): # TODO: verify if msrest.serialization.Model is necessary if not body: return if isinstance(body, bytes): pass else: if isinstance(body, Serializable): body = body.to_json() elif isinstance(body, Model): body = json.dumps(body.as_dict()) body = body.encode("ascii") self.add_stream(list(body)) def add_stream(self, content: object, stream_id: UUID = None): if not content: raise TypeError( f"'content: {content.__class__.__name__}' argument can't be None" ) if not self.streams: self.streams = [] self.streams.append( ResponseMessageStream(id=stream_id or uuid4(), content=content) )
botbuilder-python/libraries/botframework-streaming/botframework/streaming/streaming_request.py/0
{ "file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/streaming_request.py", "repo_id": "botbuilder-python", "token_count": 1085 }
409
# # Run functional test on bot deployed to a Docker Linux environment in Azure. # pool: vmImage: 'Ubuntu-16.04' trigger: # ci trigger branches: include: - master pr: none # no pr trigger variables: # Container registry service connection established during pipeline creation dockerRegistryServiceConnection: 'NightlyE2E-Acr' azureRmServiceConnection: 'NightlyE2E-RM' dockerFilePath: 'libraries/functional-tests/functionaltestbot/Dockerfile' buildIdTag: $(Build.BuildNumber) webAppName: 'e2epython' containerRegistry: 'nightlye2etest.azurecr.io' imageRepository: 'functionaltestpy' # LinuxTestBotAppId: get this from azure # LinuxTestBotAppSecret: get this from Azure jobs: - job: Build displayName: Build and push bot image continueOnError: false steps: - task: Docker@2 displayName: Build and push bot image inputs: command: buildAndPush repository: $(imageRepository) dockerfile: $(dockerFilePath) containerRegistry: $(dockerRegistryServiceConnection) tags: $(buildIdTag) - job: Deploy displayName: Provision bot container dependsOn: - Build steps: - task: AzureRMWebAppDeployment@4 displayName: Python Functional E2E test. inputs: ConnectionType: AzureRM ConnectedServiceName: $(azureRmServiceConnection) appType: webAppContainer WebAppName: $(webAppName) DockerNamespace: $(containerRegistry) DockerRepository: $(imageRepository) DockerImageTag: $(buildIdTag) AppSettings: '-MicrosoftAppId $(LinuxTestBotAppId) -MicrosoftAppPassword $(LinuxTestBotAppSecret) -FLASK_APP /functionaltestbot/app.py -FLASK_DEBUG 1' #StartupCommand: 'flask run --host=0.0.0.0 --port=3978'
botbuilder-python/pipelines/botbuilder-python-functional-test-linux.yml/0
{ "file_path": "botbuilder-python/pipelines/botbuilder-python-functional-test-linux.yml", "repo_id": "botbuilder-python", "token_count": 600 }
410
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from botbuilder.dialogs import ( ComponentDialog, DialogTurnResult, OAuthPrompt, OAuthPromptSettings, WaterfallDialog, WaterfallStepContext ) from botbuilder.schema import TokenResponse from botbuilder.core import MessageFactory from botframework.connector.auth import MicrosoftAppCredentials from config import DefaultConfig class MainDialog(ComponentDialog): def __init__(self, config: DefaultConfig): super(MainDialog, self).__init__(MainDialog.__name__) self.connection_name = config.CONNECTION_NAME self.add_dialog( WaterfallDialog( WaterfallDialog.__name__, [self.sign_in_step, self.show_token_response] ) ) self.add_dialog( OAuthPrompt( OAuthPrompt.__name__, OAuthPromptSettings( connection_name=self.connection_name, text="Sign In to AAD", title="Sign In", oauth_app_credentials=MicrosoftAppCredentials( app_id=config.APP_ID, password=config.APP_PASSWORD ) ) ) ) async def sign_in_step(self, context: WaterfallStepContext) -> DialogTurnResult: return await context.begin_dialog(OAuthPrompt.__name__) async def show_token_response(self, context: WaterfallStepContext) -> DialogTurnResult: result: TokenResponse = context.result if not result: await context.context.send_activity(MessageFactory.text("Skill: No token response from OAuthPrompt")) else: await context.context.send_activity(MessageFactory.text(f"Skill: Your token is {result.token}")) return await context.end_dialog()
botbuilder-python/tests/experimental/sso/child/dialogs/main_dialog.py/0
{ "file_path": "botbuilder-python/tests/experimental/sso/child/dialogs/main_dialog.py", "repo_id": "botbuilder-python", "token_count": 842 }
411
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from uuid import uuid4 from typing import Dict, Tuple class RoutingIdFactory: def __init__(self): self._forward_x_ref: Dict[str, str] = {} self._backward_x_ref: Dict[str, Tuple[str, str]] = {} def create_skill_conversation_id(self, conversation_id: str, service_url: str) -> str: result = self._forward_x_ref.get(conversation_id, str(uuid4())) self._forward_x_ref[conversation_id] = result self._backward_x_ref[result] = (conversation_id, service_url) return result def get_conversation_info(self, encoded_conversation_id) -> Tuple[str, str]: return self._backward_x_ref[encoded_conversation_id]
botbuilder-python/tests/experimental/test-protocol/routing_id_factory.py/0
{ "file_path": "botbuilder-python/tests/experimental/test-protocol/routing_id_factory.py", "repo_id": "botbuilder-python", "token_count": 302 }
412
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import uuid from botbuilder.core import ( ActivityHandler, TurnContext, MessageFactory, ) from botbuilder.integration import BotFrameworkHttpClient from botbuilder.schema import DeliveryModes class ParentBot(ActivityHandler): def __init__( self, skill_client: BotFrameworkHttpClient, ): self.client = skill_client async def on_message_activity(self, turn_context: TurnContext): await turn_context.send_activity("parent: before child") activity = MessageFactory.text("parent to child") TurnContext.apply_conversation_reference( activity, TurnContext.get_conversation_reference(turn_context.activity) ) activity.delivery_mode = DeliveryModes.expect_replies activities = await self.client.post_buffered_activity( None, "toBotId", "http://localhost:3979/api/messages", "http://tempuri.org/whatever", str(uuid.uuid4()), activity, ) if activities: await turn_context.send_activities(activities) await turn_context.send_activity("parent: after child")
botbuilder-python/tests/skills/skills-buffered/parent/bots/parent_bot.py/0
{ "file_path": "botbuilder-python/tests/skills/skills-buffered/parent/bots/parent_bot.py", "repo_id": "botbuilder-python", "token_count": 489 }
413
trigger: none pr: branches: include: - main parameters: - name: static displayName: Generate Static Fonts type: boolean default: true variables: - name: runCodesignValidationInjectionBG value: false jobs: - job: build displayName: Build Font pool: vmImage: macOS-latest steps: - task: UsePythonVersion@0 displayName: 'Use Python 3.8' inputs: versionSpec: '3.8' - bash: | brew install ttfautohint pip install -r requirements.txt ufolint displayName: 'Install build dependencies' - bash: | ufolint sources/*.ufo displayName: 'Lint sources' - ${{ if eq(parameters.static, true) }}: - bash: | python ./build.py -S -W displayName: 'Build variable + static fonts' - ${{ if eq(parameters.static, false) }}: - bash: | python ./build.py -W displayName: 'Build variable fonts only' - bash: | mkdir -p out cd build zip -r ../out/CascadiaCode.zip ttf otf woff2 displayName: 'Archive all fonts' - task: PublishPipelineArtifact@1 displayName: 'Publish archive' inputs: targetPath: out artifact: 'archive'
cascadia-code/.azure-pipelines.yml/0
{ "file_path": "cascadia-code/.azure-pipelines.yml", "repo_id": "cascadia-code", "token_count": 592 }
414
# Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct][conduct-code]. For more information see the [Code of Conduct FAQ][conduct-FAQ] or contact [opencode@microsoft.com][conduct-email] with any additional questions or comments. [conduct-code]: https://opensource.microsoft.com/codeofconduct/ [conduct-FAQ]: https://opensource.microsoft.com/codeofconduct/faq/ [conduct-email]: mailto:opencode@microsoft.com
cascadia-code/CODE_OF_CONDUCT.md/0
{ "file_path": "cascadia-code/CODE_OF_CONDUCT.md", "repo_id": "cascadia-code", "token_count": 117 }
415
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ainThreedots-ar" format="2"> <advance width="1200"/> <unicode hex="06A0"/> <guideline x="121" y="689" angle="0"/> <outline> <component base="ain-ar"/> <component base="threedotsupabove-ar" xOffset="-93" yOffset="456"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ainT_hreedots-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ainT_hreedots-ar.glif", "repo_id": "cascadia-code", "token_count": 192 }
416
<?xml version='1.0' encoding='UTF-8'?> <glyph name="ghain-ar.medi" format="2"> <advance width="1200"/> <outline> <component base="ain-ar.medi"/> <component base="dotabove-ar" xOffset="10" yOffset="273"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ghain-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ghain-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 161 }
417
<?xml version='1.0' encoding='UTF-8'?> <glyph name="gravetonecomb" format="2"> <unicode hex="0340"/> <outline> <component base="gravecomb"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>gravecomb</string> </dict> </array> <key>com.schriftgestaltung.Glyphs.originalWidth</key> <integer>1200</integer> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/gravetonecomb.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/gravetonecomb.glif", "repo_id": "cascadia-code", "token_count": 304 }
418
<?xml version='1.0' encoding='UTF-8'?> <glyph name="hahTahTwodotshorizontalabove-ar" format="2"> <advance width="1200"/> <unicode hex="076F"/> <outline> <component base="hah-ar"/> <component base="twodotstahcenter-ar" xScale="0.94" yScale="0.94" xOffset="125" yOffset="-337"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.glyph.leftMetricsKey</key> <string>hah-ar</string> <key>com.schriftgestaltung.Glyphs.glyph.rightMetricsKey</key> <string>hah-ar</string> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/hahT_ahT_wodotshorizontalabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/hahT_ahT_wodotshorizontalabove-ar.glif", "repo_id": "cascadia-code", "token_count": 293 }
419
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kehehDotabove-ar" format="2"> <advance width="1200"/> <unicode hex="0762"/> <outline> <component base="keheh-ar"/> <component base="dotabove-ar" xOffset="54" yOffset="601"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehD_otabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehD_otabove-ar.glif", "repo_id": "cascadia-code", "token_count": 173 }
420
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kehehTwodotshorizontalabove-ar" format="2"> <advance width="1200"/> <unicode hex="063B"/> <anchor x="625" y="1494" name="top"/> <outline> <component base="keheh-ar"/> <component base="twodotshorizontalabove-ar.v2" xScale="0.7" yScale="0.7" xOffset="206" yOffset="750"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehT_wodotshorizontalabove-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/kehehT_wodotshorizontalabove-ar.glif", "repo_id": "cascadia-code", "token_count": 218 }
421
<?xml version='1.0' encoding='UTF-8'?> <glyph name="lamDotabove-ar.init.rlig" format="2"> <advance width="1200"/> <guideline x="305" y="819" angle="0"/> <anchor x="0" y="0" name="overlap"/> <outline> <component base="lam-ar.init.rlig"/> <component base="dotabove-ar" xOffset="324" yOffset="925"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>dotabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamD_otabove-ar.init.rlig.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/lamD_otabove-ar.init.rlig.glif", "repo_id": "cascadia-code", "token_count": 381 }
422
<?xml version='1.0' encoding='UTF-8'?> <glyph name="leftHalfBlackDiamond" format="2"> <advance width="1200"/> <unicode hex="2B16"/> <note> uni2B16 </note> <outline> <contour> <point x="600" y="192" type="line"/> <point x="600" y="1228" type="line"/> <point x="67" y="710" type="line"/> </contour> <component base="whiteDiamond"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>whiteDiamond</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/leftH_alfB_lackD_iamond.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/leftH_alfB_lackD_iamond.glif", "repo_id": "cascadia-code", "token_count": 366 }
423
<?xml version='1.0' encoding='UTF-8'?> <glyph name="less_bar_greater.liga" format="2"> <advance width="1200"/> <outline> <component base="greater" xOffset="2256"/> <component base="less" xOffset="164"/> <component base="bar" xOffset="1210"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>greater</string> </dict> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>1</integer> <key>name</key> <string>less</string> </dict> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>2</integer> <key>name</key> <string>bar</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/less_bar_greater.liga.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/less_bar_greater.liga.glif", "repo_id": "cascadia-code", "token_count": 557 }
424
<?xml version='1.0' encoding='UTF-8'?> <glyph name="period_period_equal.liga" format="2"> <advance width="1200"/> <outline> <contour> <point x="1900" y="835" type="line"/> <point x="3320" y="835" type="line"/> <point x="3320" y="1085" type="line"/> <point x="1900" y="1085" type="line"/> </contour> <contour> <point x="1900" y="333" type="line"/> <point x="3320" y="333" type="line"/> <point x="3320" y="583" type="line"/> <point x="1900" y="583" type="line"/> </contour> <component base="period" xOffset="-120"/> <component base="period" xOffset="650"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>period</string> </dict> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>1</integer> <key>name</key> <string>period</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/period_period_equal.liga.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/period_period_equal.liga.glif", "repo_id": "cascadia-code", "token_count": 612 }
425
<?xml version='1.0' encoding='UTF-8'?> <glyph name="plusminus" format="2"> <advance width="1200"/> <unicode hex="00B1"/> <outline> <contour> <point x="469" y="246" type="line"/> <point x="731" y="246" type="line"/> <point x="731" y="1224" type="line"/> <point x="469" y="1224" type="line"/> </contour> <contour> <point x="100" y="651" type="line"/> <point x="1100" y="651" type="line"/> <point x="1100" y="901" type="line"/> <point x="100" y="901" type="line"/> </contour> <component base="minus" yOffset="-430"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>minus</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/plusminus.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/plusminus.glif", "repo_id": "cascadia-code", "token_count": 473 }
426
<?xml version='1.0' encoding='UTF-8'?> <glyph name="quotedbl" format="2"> <advance width="1200"/> <unicode hex="0022"/> <outline> <component base="quotesingle" xOffset="-233"/> <component base="quotesingle" xOffset="237"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>quotesingle</string> </dict> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>1</integer> <key>name</key> <string>quotesingle</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/quotedbl.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/quotedbl.glif", "repo_id": "cascadia-code", "token_count": 426 }
427
<?xml version='1.0' encoding='UTF-8'?> <glyph name="sadThreedots-ar" format="2"> <advance width="1200"/> <unicode hex="069E"/> <outline> <component base="sad-ar"/> <component base="threedotsupabove-ar" xOffset="280" yOffset="373"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/sadT_hreedots-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/sadT_hreedots-ar.glif", "repo_id": "cascadia-code", "token_count": 175 }
428
<?xml version='1.0' encoding='UTF-8'?> <glyph name="underscoredbl" format="2"> <advance width="1200"/> <unicode hex="2017"/> <outline> <component base="underscore"/> <component base="underscore" yOffset="-346"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>underscore</string> </dict> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>1</integer> <key>name</key> <string>underscore</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/underscoredbl.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/underscoredbl.glif", "repo_id": "cascadia-code", "token_count": 417 }
429
<?xml version='1.0' encoding='UTF-8'?> <glyph name="yehRohingya-ar.fina" format="2"> <advance width="1200"/> <guideline x="243" y="-198" angle="90"/> <outline> <component base="yehRohingya-ar"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.glyph.rightMetricsKey</key> <string>_part.instroke</string> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/yehR_ohingya-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/yehR_ohingya-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 213 }
430
<?xml version='1.0' encoding='UTF-8'?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>_alefFathatan-ar.fina.rlig</key> <string>_alefF_athatan-ar.fina.rlig.glif</string> <key>_tahabove</key> <string>_tahabove.glif</string> <key>allah-ar</key> <string>allah-ar.glif</string> <key>bar_braceright.liga</key> <string>bar_braceright.liga.glif</string> <key>braceleft_bar.liga</key> <string>braceleft_bar.liga.glif</string> <key>braceright</key> <string>braceright.glif</string> <key>braceright_numbersign.liga</key> <string>braceright_numbersign.liga.glif</string> <key>numbersign_braceleft.liga</key> <string>numbersign_braceleft.liga.glif</string> <key>question-ar</key> <string>question-ar.glif</string> </dict> </plist>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs.public.background/contents.plist/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs.public.background/contents.plist", "repo_id": "cascadia-code", "token_count": 410 }
431
<?xml version='1.0' encoding='UTF-8'?> <glyph name="Aringacute" format="2"> <advance width="1200"/> <unicode hex="01FA"/> <outline> <component base="acutecomb.case" xOffset="-30" yOffset="248"/> <component base="Aring"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>acutecomb.case</string> </dict> </array> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/A_ringacute.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/A_ringacute.glif", "repo_id": "cascadia-code", "token_count": 304 }
432
<?xml version='1.0' encoding='UTF-8'?> <glyph name="alefMaksuraAlefabove-ar.fina.alt" format="2"> <anchor x="0" y="0" name="_overlap"/> <outline> <component base="alefMaksura-ar.fina.alt"/> <component base="alefabove-ar" xOffset="-210" yOffset="-51"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>alefMaksura-ar.fina.alt</string> </dict> <dict> <key>anchor</key> <string>top</string> <key>index</key> <integer>1</integer> <key>name</key> <string>alefabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/alefM_aksuraA_lefabove-ar.fina.alt.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/alefM_aksuraA_lefabove-ar.fina.alt.glif", "repo_id": "cascadia-code", "token_count": 501 }
433
<?xml version='1.0' encoding='UTF-8'?> <glyph name="highhamzaWaw-ar" format="2"> <advance width="1200"/> <unicode hex="0676"/> <anchor x="573" y="1120" name="top"/> <outline> <component base="waw-ar" yOffset="3"/> <component base="highhamza-ar" xOffset="383" yOffset="-337"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>index</key> <integer>0</integer> <key>name</key> <string>waw-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/highhamzaW_aw-ar.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/highhamzaW_aw-ar.glif", "repo_id": "cascadia-code", "token_count": 370 }
434
<?xml version='1.0' encoding='UTF-8'?> <glyph name="kafDotabove-ar.init" format="2"> <advance width="1200"/> <outline> <component base="kaf-ar.init"/> <component base="dotabove-ar" xOffset="-187" yOffset="601"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/kafD_otabove-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/kafD_otabove-ar.init.glif", "repo_id": "cascadia-code", "token_count": 166 }
435
<?xml version='1.0' encoding='UTF-8'?> <glyph name="lamThreedotsabove-ar.medi" format="2"> <advance width="1200"/> <anchor x="708" y="2033" name="top"/> <outline> <component base="lam-ar.medi"/> <component base="threedotsupabove-ar" xScale="0.9" yScale="0.9" xOffset="161" yOffset="979"/> </outline> <lib> <dict> <key>com.schriftgestaltung.Glyphs.ComponentInfo</key> <array> <dict> <key>alignment</key> <integer>-1</integer> <key>anchor</key> <string>top.dot</string> <key>index</key> <integer>1</integer> <key>name</key> <string>threedotsupabove-ar</string> </dict> </array> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/lamT_hreedotsabove-ar.medi.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/lamT_hreedotsabove-ar.medi.glif", "repo_id": "cascadia-code", "token_count": 414 }
436
<?xml version='1.0' encoding='UTF-8'?> <glyph name="rehHamzaabove-ar.fina" format="2"> <advance width="1200"/> <outline> <component base="reh-ar.fina"/> <component base="hamzaabove-ar" xOffset="60" yOffset="-469"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/rehH_amzaabove-ar.fina.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/rehH_amzaabove-ar.fina.glif", "repo_id": "cascadia-code", "token_count": 169 }
437
<?xml version='1.0' encoding='UTF-8'?> <glyph name="tchehDotabove-ar.init" format="2"> <advance width="1200"/> <outline> <component base="tcheh-ar.init"/> <component base="dotabove-ar" xOffset="-34" yOffset="332"/> </outline> <lib> <dict> <key>public.markColor</key> <string>0.98,0.36,0.67,1</string> </dict> </lib> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/tchehD_otabove-ar.init.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/tchehD_otabove-ar.init.glif", "repo_id": "cascadia-code", "token_count": 168 }
438
<?xml version='1.0' encoding='UTF-8'?> <glyph name="upperHalfBlackWhiteCircle" format="2"> <advance width="1200"/> <unicode hex="25D3"/> <note> uni25D3 </note> <outline> <contour> <point x="135" y="710" type="line"/> <point x="1065" y="710" type="line"/> <point x="984" y="1051" type="line"/> <point x="594" y="1198" type="line"/> <point x="241" y="1051" type="line"/> </contour> <component base="whiteCircle" yScale="-1" yOffset="1420"/> </outline> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/upperH_alfB_lackW_hiteC_ircle.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/glyphs/upperH_alfB_lackW_hiteC_ircle.glif", "repo_id": "cascadia-code", "token_count": 229 }
439
<?xml version='1.0' encoding='UTF-8'?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <array> <array> <string>public.default</string> <string>glyphs</string> </array> <array> <string>public.background</string> <string>glyphs.public.background</string> </array> <array> <string>ExtraCondensed Black</string> <string>glyphs.E_xtraC_ondensed B_lack</string> </array> <array> <string>ExtraCondensed Thin</string> <string>glyphs.E_xtraC_ondensed T_hin</string> </array> <array> <string>ExtraCondensed Regular</string> <string>glyphs.E_xtraC_ondensed R_egular</string> </array> </array> </plist>
cascadia-code/sources/CascadiaCode-ExtraLight.ufo/layercontents.plist/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLight.ufo/layercontents.plist", "repo_id": "cascadia-code", "token_count": 353 }
440
<?xml version='1.0' encoding='UTF-8'?> <glyph name="F" format="2"> <advance width="1200"/> <unicode hex="0046"/> <guideline x="941" y="-480" angle="80"/> <guideline x="761" y="-480" angle="80"/> <anchor x="506" y="0" name="bottom"/> <anchor x="757" y="1420" name="top"/> <outline> <contour> <point x="111" y="0" type="line"/> <point x="207" y="0" type="line"/> <point x="457" y="1420" type="line"/> <point x="361" y="1420" type="line"/> </contour> <contour> <point x="253" y="606" type="line"/> <point x="952" y="606" type="line"/> <point x="968" y="696" type="line"/> <point x="270" y="696" type="line"/> </contour> <contour> <point x="345" y="1330" type="line"/> <point x="1260" y="1330" type="line"/> <point x="1276" y="1420" type="line"/> <point x="361" y="1420" type="line"/> </contour> </outline> </glyph>
cascadia-code/sources/CascadiaCode-ExtraLightItalic.ufo/glyphs/F_.glif/0
{ "file_path": "cascadia-code/sources/CascadiaCode-ExtraLightItalic.ufo/glyphs/F_.glif", "repo_id": "cascadia-code", "token_count": 435 }
441