text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
#include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __global__ void ROIPoolForward(const int nthreads, const scalar_t *bottom_data, const scalar_t *rois, const scalar_t spatial_scale, const int channels, const int height, const int width, const int pooled_h, const int pooled_w, scalar_t *top_data, int *argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_w; int ph = (index / pooled_w) % pooled_h; int c = (index / pooled_w / pooled_h) % channels; int n = index / pooled_w / pooled_h / channels; const scalar_t *offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // calculate the roi region on feature maps scalar_t roi_x1 = offset_rois[1] * spatial_scale; scalar_t roi_y1 = offset_rois[2] * spatial_scale; scalar_t roi_x2 = (offset_rois[3] + 1) * spatial_scale; scalar_t roi_y2 = (offset_rois[4] + 1) * spatial_scale; // force malformed rois to be 1x1 scalar_t roi_w = roi_x2 - roi_x1; scalar_t roi_h = roi_y2 - roi_y1; if (roi_w <= 0 || roi_h <= 0) continue; scalar_t bin_size_w = roi_w / static_cast<scalar_t>(pooled_w); scalar_t bin_size_h = roi_h / static_cast<scalar_t>(pooled_h); // the corresponding bin region int bin_x1 = floor(static_cast<scalar_t>(pw) * bin_size_w + roi_x1); int bin_y1 = floor(static_cast<scalar_t>(ph) * bin_size_h + roi_y1); int bin_x2 = ceil(static_cast<scalar_t>(pw + 1) * bin_size_w + roi_x1); int bin_y2 = ceil(static_cast<scalar_t>(ph + 1) * bin_size_h + roi_y1); // add roi offsets and clip to input boundaries bin_x1 = min(max(bin_x1, 0), width); bin_y1 = min(max(bin_y1, 0), height); bin_x2 = min(max(bin_x2, 0), width); bin_y2 = min(max(bin_y2, 0), height); bool is_empty = (bin_y2 <= bin_y1) || (bin_x2 <= bin_x1); // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int max_idx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; // Define an empty pooling region to be zero scalar_t max_val = is_empty ? static_cast<scalar_t>(0) : bottom_data[bin_y1 * width + bin_x1] - 1; for (int h = bin_y1; h < bin_y2; ++h) { for (int w = bin_x1; w < bin_x2; ++w) { int offset = h * width + w; if (bottom_data[offset] > max_val) { max_val = bottom_data[offset]; max_idx = offset; } } } top_data[index] = max_val; if (argmax_data != NULL) argmax_data[index] = max_idx; } } int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int channels, const int height, const int width, const int num_rois, const int pooled_h, const int pooled_w, at::Tensor output, at::Tensor argmax) { const int output_size = num_rois * channels * pooled_h * pooled_w; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "ROIPoolLaucherForward", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); int *argmax_data = argmax.data<int>(); ROIPoolForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data, rois_data, scalar_t(spatial_scale), channels, height, width, pooled_h, pooled_w, top_data, argmax_data); })); THCudaCheck(cudaGetLastError()); return 1; } template <typename scalar_t> __global__ void ROIPoolBackward(const int nthreads, const scalar_t *top_diff, const scalar_t *rois, const int *argmax_data, const scalar_t spatial_scale, const int channels, const int height, const int width, const int pooled_h, const int pooled_w, scalar_t *bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_w; int ph = (index / pooled_w) % pooled_h; int c = (index / pooled_w / pooled_h) % channels; int n = index / pooled_w / pooled_h / channels; int roi_batch_ind = rois[n * 5]; int bottom_index = argmax_data[(n * channels + c) * pooled_h * pooled_w + ph * pooled_w + pw]; atomicAdd(bottom_diff + (roi_batch_ind * channels + c) * height * width + bottom_index, top_diff[index]); } } int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, const at::Tensor argmax, const float spatial_scale, const int batch_size, const int channels, const int height, const int width, const int num_rois, const int pooled_h, const int pooled_w, at::Tensor bottom_grad) { const int output_size = num_rois * pooled_h * pooled_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "ROIPoolLaucherBackward", ([&] { const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); const int *argmax_data = argmax.data<int>(); scalar_t *bottom_diff = bottom_grad.data<scalar_t>(); if (sizeof(scalar_t) == sizeof(double)) { fprintf(stderr, "double is not supported\n"); exit(-1); } ROIPoolBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, top_diff, rois_data, argmax_data, scalar_t(spatial_scale), channels, height, width, pooled_h, pooled_w, bottom_diff); })); THCudaCheck(cudaGetLastError()); return 1; }
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_kernel.cu/0
{ "file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_kernel.cu", "repo_id": "Cream", "token_count": 3271 }
300
# -*- coding: utf-8 -*- """ This module defines the :class:`NiceRepr` mixin class, which defines a ``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__`` method, which you must define. This means you only have to overload one function instead of two. Furthermore, if the object defines a ``__len__`` method, then the ``__nice__`` method defaults to something sensible, otherwise it is treated as abstract and raises ``NotImplementedError``. To use simply have your object inherit from :class:`NiceRepr` (multi-inheritance should be ok). This code was copied from the ubelt library: https://github.com/Erotemic/ubelt Example: >>> # Objects that define __nice__ have a default __str__ and __repr__ >>> class Student(NiceRepr): ... def __init__(self, name): ... self.name = name ... def __nice__(self): ... return self.name >>> s1 = Student('Alice') >>> s2 = Student('Bob') >>> print('s1 = {}'.format(s1)) >>> print('s2 = {}'.format(s2)) s1 = <Student(Alice)> s2 = <Student(Bob)> Example: >>> # Objects that define __len__ have a default __nice__ >>> class Group(NiceRepr): ... def __init__(self, data): ... self.data = data ... def __len__(self): ... return len(self.data) >>> g = Group([1, 2, 3]) >>> print('g = {}'.format(g)) g = <Group(3)> """ import warnings class NiceRepr(object): """ Inherit from this class and define ``__nice__`` to "nicely" print your objects. Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. If the inheriting class has a ``__len__``, method then the default ``__nice__`` method will return its length. Example: >>> class Foo(NiceRepr): ... def __nice__(self): ... return 'info' >>> foo = Foo() >>> assert str(foo) == '<Foo(info)>' >>> assert repr(foo).startswith('<Foo(info) at ') Example: >>> class Bar(NiceRepr): ... pass >>> bar = Bar() >>> import pytest >>> with pytest.warns(None) as record: >>> assert 'object at' in str(bar) >>> assert 'object at' in repr(bar) Example: >>> class Baz(NiceRepr): ... def __len__(self): ... return 5 >>> baz = Baz() >>> assert str(baz) == '<Baz(5)>' """ def __nice__(self): if hasattr(self, '__len__'): # It is a common pattern for objects to use __len__ in __nice__ # As a convenience we define a default __nice__ for these objects return str(len(self)) else: # In all other cases force the subclass to overload __nice__ raise NotImplementedError( 'Define the __nice__ method for {!r}'.format(self.__class__)) def __repr__(self): try: nice = self.__nice__() classname = self.__class__.__name__ return '<{0}({1}) at {2}>'.format(classname, nice, hex(id(self))) except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self) def __str__(self): try: classname = self.__class__.__name__ nice = self.__nice__() return '<{0}({1})>'.format(classname, nice) except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self)
Cream/CDARTS/CDARTS_detection/mmdet/utils/util_mixins.py/0
{ "file_path": "Cream/CDARTS/CDARTS_detection/mmdet/utils/util_mixins.py", "repo_id": "Cream", "token_count": 1532 }
301
json_file: "jsons/big4.json" data_path: "../DATASET/ADEChallengeData2016/" dataset: "coco" det2_cfg: "configs/ADE20K/base.yaml" num_classes: 150 max_iteration: 160000 seed: 12345 random_sample: False eval_flag: True opt: "sgd" opt_eps: 0.001 sched: "new" #"raw for original" epochs: 1000 drop_path_prob: 0.2 image_height: 640 image_width: 640 eval_height: 640 eval_width: 640 crop_size: 640 batch_size: 4 mode: "poly" base_lr: 0.05 Fch: 16 bn_momentum: 0.01 warmup_start_lr: 5e-6 warmup_iters: 1000 weight_decay: 1e-4 model_ema: True model_ema_decay: 0.9998 clip_grad: 1.0 lamb: 0.2 ignore: 255 topk_percent: 0.2 semantic_loss_weight: 1.0 center_loss_weight: 200 offset_loss_weight: 0.01 eval_flip: False
Cream/CDARTS/CDARTS_segmentation/configs/ade/cydas.yaml/0
{ "file_path": "Cream/CDARTS/CDARTS_segmentation/configs/ade/cydas.yaml", "repo_id": "Cream", "token_count": 313 }
302
# ------------------------------------------------------------------------------ # Loads COCO panoptic dataset. # Written by Bowen Cheng (bcheng9@illinois.edu) # ------------------------------------------------------------------------------ import json import os import numpy as np from .base_dataset import BaseDataset from .utils import DatasetDescriptor from ..transforms import build_transforms, Resize, PanopticTargetGenerator, SemanticTargetGenerator _COCO_PANOPTIC_INFORMATION = DatasetDescriptor( splits_to_sizes={'train2017': 118287, 'trainval2017': 123287, 'val2017': 5000, 'test-dev2017': 20288, 'test2017': 40670}, # `test` includes `test-dev` and `test-challenge` num_classes=133, ignore_label=255, ) # Add 1 void label. _COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID = ( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 100, 107, 109, 112, 118, 119, 122, 125, 128, 130, 133, 138, 141, 144, 145, 147, 148, 149, 151, 154, 155, 156, 159, 161, 166, 168, 171, 175, 176, 177, 178, 180, 181, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 0]) _COCO_PANOPTIC_EVAL_ID_TO_TRAIN_ID = { v: k for k, v in enumerate(_COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID[:-1]) } _COCO_PANOPTIC_THING_LIST = list(range(80)) # the first 80 classes are `thing` classes COCO_CATEGORIES = [ {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, ] class COCOPanoptic(BaseDataset): """ COCO panoptic segmentation dataset. Arguments: root: Str, root directory. split: Str, data split, e.g. train/val/test. is_train: Bool, for training or testing. crop_size: Tuple, crop size. mirror: Bool, whether to apply random horizontal flip. min_scale: Float, min scale in scale augmentation. max_scale: Float, max scale in scale augmentation. scale_step_size: Float, step size to select random scale. mean: Tuple, image mean. std: Tuple, image std. semantic_only: Bool, only use semantic segmentation label. ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. small_instance_area: Integer, indicates largest area for small instances. small_instance_weight: Integer, indicates semantic loss weights for small instances. """ def __init__(self, root, split, min_resize_value=641, max_resize_value=641, resize_factor=32, is_train=True, crop_size=(641, 641), mirror=True, min_scale=0.5, max_scale=2., scale_step_size=0.25, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), semantic_only=False, ignore_stuff_in_offset=False, small_instance_area=0, small_instance_weight=1, **kwargs): super(COCOPanoptic, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale, scale_step_size, mean, std) assert split in _COCO_PANOPTIC_INFORMATION.splits_to_sizes.keys() self.num_classes = _COCO_PANOPTIC_INFORMATION.num_classes self.ignore_label = _COCO_PANOPTIC_INFORMATION.ignore_label self.label_pad_value = (0, 0, 0) self.has_instance = True self.label_divisor = 256 self.label_dtype = np.float32 self.thing_list = _COCO_PANOPTIC_THING_LIST # Get image and annotation list. if 'test' in split: self.img_list = [] self.ann_list = None self.ins_list = None json_filename = os.path.join(self.root, 'annotations', 'image_info_{}.json'.format(self.split)) dataset = json.load(open(json_filename)) for img in dataset['images']: img_file_name = img['file_name'] self.img_list.append(os.path.join(self.root, 'test2017', img_file_name)) else: self.img_list = [] self.ann_list = [] self.ins_list = [] json_filename = os.path.join(self.root, 'annotations', 'panoptic_{}_trainId.json'.format(self.split)) dataset = json.load(open(json_filename)) # First sort by image id. images = sorted(dataset['images'], key=lambda i: i['id']) annotations = sorted(dataset['annotations'], key=lambda i: i['image_id']) for img in images: img_file_name = img['file_name'] self.img_list.append(os.path.join(self.root, self.split, img_file_name)) for ann in annotations: ann_file_name = ann['file_name'] self.ann_list.append(os.path.join( self.root, 'annotations', 'panoptic_{}'.format(self.split), ann_file_name)) self.ins_list.append(ann['segments_info']) assert len(self) == _COCO_PANOPTIC_INFORMATION.splits_to_sizes[self.split] self.pre_augmentation_transform = Resize(min_resize_value, max_resize_value, resize_factor) self.transform = build_transforms(self, is_train) if semantic_only: self.target_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) else: self.target_transform = PanopticTargetGenerator(self.ignore_label, self.rgb2id, _COCO_PANOPTIC_THING_LIST, sigma=8, ignore_stuff_in_offset=ignore_stuff_in_offset, small_instance_area=small_instance_area, small_instance_weight=small_instance_weight) # Generates semantic label for evaluation. self.raw_label_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) @staticmethod def train_id_to_eval_id(): return _COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID @staticmethod def rgb2id(color): """Converts the color to panoptic label. Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. Args: color: Ndarray or a tuple, color encoded image. Returns: Panoptic label. """ if isinstance(color, np.ndarray) and len(color.shape) == 3: if color.dtype == np.uint8: color = color.astype(np.int32) return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) @staticmethod def create_label_colormap(): """Creates a label colormap used in COCO panoptic benchmark. Returns: A colormap for visualizing segmentation results. """ colormap = np.zeros((256, 3), dtype=np.uint8) for i, color in enumerate(COCO_CATEGORIES): colormap[i] = color['color'] return colormap
Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/coco_panoptic.py/0
{ "file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/coco_panoptic.py", "repo_id": "Cream", "token_count": 7871 }
303
from .build import build_segmentation_model_from_cfg
Cream/CDARTS/CDARTS_segmentation/segmentation/model/__init__.py/0
{ "file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/__init__.py", "repo_id": "Cream", "token_count": 16 }
304
from .deeplabv3 import DeepLabV3 from .deeplabv3plus import DeepLabV3Plus from .panoptic_deeplab import PanopticDeepLab
Cream/CDARTS/CDARTS_segmentation/segmentation/model/meta_arch/__init__.py/0
{ "file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/meta_arch/__init__.py", "repo_id": "Cream", "token_count": 47 }
305
# ------------------------------------------------------------------------------ # Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/env.py#L15 # Modified by Bowen Cheng (bcheng9@illinois.edu) # ------------------------------------------------------------------------------ import importlib import importlib.util import logging import numpy as np import os import random import sys from datetime import datetime import torch __all__ = ["seed_all_rng"] def seed_all_rng(seed=None): """ Set the random seed for the RNG in torch, numpy and python. Args: seed (int): if None, will use a strong random seed. """ if seed is None: seed = ( os.getpid() + int(datetime.now().strftime("%S%f")) + int.from_bytes(os.urandom(2), "big") ) logger = logging.getLogger(__name__) logger.info("Using a generated random seed {}".format(seed)) np.random.seed(seed) torch.set_rng_state(torch.manual_seed(seed).get_state()) random.seed(seed)
Cream/CDARTS/CDARTS_segmentation/segmentation/utils/env.py/0
{ "file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/utils/env.py", "repo_id": "Cream", "token_count": 382 }
306
import cv2 import numpy as np import numbers import random import collections def get_2dshape(shape, *, zero=True): if not isinstance(shape, collections.Iterable): shape = int(shape) shape = (shape, shape) else: h, w = map(int, shape) shape = (h, w) if zero: minv = 0 else: minv = 1 assert min(shape) >= minv, 'invalid shape: {}'.format(shape) return shape def random_crop_pad_to_shape(img, crop_pos, crop_size, pad_label_value): h, w = img.shape[:2] start_crop_h, start_crop_w = crop_pos assert ((start_crop_h < h) and (start_crop_h >= 0)) assert ((start_crop_w < w) and (start_crop_w >= 0)) crop_size = get_2dshape(crop_size) crop_h, crop_w = crop_size img_crop = img[start_crop_h:start_crop_h + crop_h, start_crop_w:start_crop_w + crop_w, ...] img_, margin = pad_image_to_shape(img_crop, crop_size, cv2.BORDER_CONSTANT, pad_label_value) return img_, margin def generate_random_crop_pos(ori_size, crop_size): ori_size = get_2dshape(ori_size) h, w = ori_size crop_size = get_2dshape(crop_size) crop_h, crop_w = crop_size pos_h, pos_w = 0, 0 if h > crop_h: pos_h = random.randint(0, h - crop_h + 1) if w > crop_w: pos_w = random.randint(0, w - crop_w + 1) return pos_h, pos_w def pad_image_to_shape(img, shape, border_mode, value): margin = np.zeros(4, np.uint32) shape = get_2dshape(shape) pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0 pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0 margin[0] = pad_height // 2 margin[1] = pad_height // 2 + pad_height % 2 margin[2] = pad_width // 2 margin[3] = pad_width // 2 + pad_width % 2 img = cv2.copyMakeBorder(img, margin[0], margin[1], margin[2], margin[3], border_mode, value=value) return img, margin def pad_image_size_to_multiples_of(img, multiple, pad_value): h, w = img.shape[:2] d = multiple def canonicalize(s): v = s // d return (v + (v * d != s)) * d th, tw = map(canonicalize, (h, w)) return pad_image_to_shape(img, (th, tw), cv2.BORDER_CONSTANT, pad_value) def resize_ensure_shortest_edge(img, edge_length, interpolation_mode=cv2.INTER_LINEAR): assert isinstance(edge_length, int) and edge_length > 0, edge_length h, w = img.shape[:2] if h < w: ratio = float(edge_length) / h th, tw = edge_length, max(1, int(ratio * w)) else: ratio = float(edge_length) / w th, tw = max(1, int(ratio * h)), edge_length img = cv2.resize(img, (tw, th), interpolation_mode) return img def random_scale(img, gt, scales): scale = random.choice(scales) sh = int(img.shape[0] * scale) sw = int(img.shape[1] * scale) img = cv2.resize(img, (sw, sh), interpolation=cv2.INTER_LINEAR) gt = cv2.resize(gt, (sw, sh), interpolation=cv2.INTER_NEAREST) return img, gt, scale def random_scale_with_length(img, gt, length): size = random.choice(length) sh = size sw = size img = cv2.resize(img, (sw, sh), interpolation=cv2.INTER_LINEAR) gt = cv2.resize(gt, (sw, sh), interpolation=cv2.INTER_NEAREST) return img, gt, size def random_mirror(img, gt): if random.random() >= 0.5: img = cv2.flip(img, 1) gt = cv2.flip(gt, 1) return img, gt, def random_rotation(img, gt): angle = random.random() * 20 - 10 h, w = img.shape[:2] rotation_matrix = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1) img = cv2.warpAffine(img, rotation_matrix, (w, h), flags=cv2.INTER_LINEAR) gt = cv2.warpAffine(gt, rotation_matrix, (w, h), flags=cv2.INTER_NEAREST) return img, gt def random_gaussian_blur(img): gauss_size = random.choice([1, 3, 5, 7]) if gauss_size > 1: # do the gaussian blur img = cv2.GaussianBlur(img, (gauss_size, gauss_size), 0) return img def center_crop(img, shape): h, w = shape[0], shape[1] y = (img.shape[0] - h) // 2 x = (img.shape[1] - w) // 2 return img[y:y + h, x:x + w] def random_crop(img, gt, size): if isinstance(size, numbers.Number): size = (int(size), int(size)) h, w = img.shape[:2] crop_h, crop_w = size[0], size[1] if h > crop_h: x = random.randint(0, h - crop_h + 1) img = img[x:x + crop_h, :, :] gt = gt[x:x + crop_h, :] if w > crop_w: x = random.randint(0, w - crop_w + 1) img = img[:, x:x + crop_w, :] gt = gt[:, x:x + crop_w] return img, gt def normalize(img, mean, std): # pytorch pretrained model need the input range: 0-1 img = img.astype(np.float32) / 255.0 img = img - mean img = img / std return img
Cream/CDARTS/CDARTS_segmentation/tools/utils/img_utils.py/0
{ "file_path": "Cream/CDARTS/CDARTS_segmentation/tools/utils/img_utils.py", "repo_id": "Cream", "token_count": 2324 }
307
import torch.nn as nn from pdb import set_trace as bp from layers import NaiveSyncBatchNorm BatchNorm2d = NaiveSyncBatchNorm def make_divisible(v, divisor=8, min_value=1): """ forked from slim: https://github.com/tensorflow/models/blob/\ 0344c5503ee55e24f0de7f37336a6e08f10976fd/\ research/slim/nets/mobilenet/mobilenet.py#L62-L69 """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class USConv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, depthwise=False, bias=True, width_mult_list=[1.]): super(USConv2d, self).__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.depthwise = depthwise self.in_channels_max = in_channels self.out_channels_max = out_channels self.width_mult_list = width_mult_list self.ratio = (1., 1.) def set_ratio(self, ratio): self.ratio = ratio def forward(self, input): assert self.ratio[0] in self.width_mult_list, str(self.ratio[0]) + " in? " + str(self.width_mult_list) self.in_channels = make_divisible(self.in_channels_max * self.ratio[0]) assert self.ratio[1] in self.width_mult_list, str(self.ratio[1]) + " in? " + str(self.width_mult_list) self.out_channels = make_divisible(self.out_channels_max * self.ratio[1]) self.groups = self.in_channels if self.depthwise else 1 weight = self.weight[:self.out_channels, :self.in_channels, :, :] if self.bias is not None: bias = self.bias[:self.out_channels] else: bias = self.bias y = nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) return y class USBatchNorm2d(BatchNorm2d): def __init__(self, num_features, width_mult_list=[1.]): super(USBatchNorm2d, self).__init__( num_features, affine=True, track_running_stats=False) self.num_features_max = num_features self.width_mult_list = width_mult_list # for tracking performance during training self.bn = nn.ModuleList( [ BatchNorm2d(i, affine=True) for i in [ make_divisible(self.num_features_max * width_mult) for width_mult in width_mult_list ] ] ) self.ratio = 1. def set_ratio(self, ratio): self.ratio = ratio def forward(self, input): assert self.ratio in self.width_mult_list idx = self.width_mult_list.index(self.ratio) y = self.bn[idx](input) return y
Cream/CDARTS/CDARTS_segmentation/train/slimmable_ops.py/0
{ "file_path": "Cream/CDARTS/CDARTS_segmentation/train/slimmable_ops.py", "repo_id": "Cream", "token_count": 1311 }
308
""" Utilities """ import os import torch import torch.nn as nn import torch.nn.functional as F import logging import shutil import torch import torch.distributed as dist import numpy as np def distill(result): result = result.split('\n') cifar10 = result[5].replace(' ', '').split(':') cifar100 = result[7].replace(' ', '').split(':') imagenet16 = result[9].replace(' ', '').split(':') cifar10_train = float(cifar10[1].strip(',test')[-7:-2].strip('=')) cifar10_test = float(cifar10[2][-7:-2].strip('=')) cifar100_train = float(cifar100[1].strip(',valid')[-7:-2].strip('=')) cifar100_valid = float(cifar100[2].strip(',test')[-7:-2].strip('=')) cifar100_test = float(cifar100[3][-7:-2].strip('=')) imagenet16_train = float(imagenet16[1].strip(',valid')[-7:-2].strip('=')) imagenet16_valid = float(imagenet16[2].strip(',test')[-7:-2].strip('=')) imagenet16_test = float(imagenet16[3][-7:-2].strip('=')) return cifar10_train, cifar10_test, cifar100_train, cifar100_valid, \ cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test class AverageMeter(): """ Computes and stores the average and current value """ def __init__(self): self.reset() def reset(self): """ Reset all statistics """ self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): """ Update statistics """ self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def get_logger(file_path): """ Make python logger """ logger = logging.getLogger('cdarts') log_format = '%(asctime)s | %(message)s' formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') file_handler = logging.FileHandler(file_path) file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.addHandler(stream_handler) logger.setLevel(logging.INFO) return logger def param_size(model): """ Compute parameter size in MB """ n_params = sum( np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head')) return n_params / 1e6 def print_speed(i, i_time, n, logger): """print_speed(index, index_time, total_iteration)""" average_time = i_time remaining_time = (n - i) * average_time remaining_day = math.floor(remaining_time / 86400) remaining_hour = math.floor(remaining_time / 3600 - remaining_day * 24) remaining_min = math.floor(remaining_time / 60 - remaining_day * 1440 - remaining_hour * 60) logger.info('Progress: %d / %d [%d%%], Speed: %.3f s/iter, ETA %d:%02d:%02d (D:H:M)\n' % (i, n, i/n*100, average_time, remaining_day, remaining_hour, remaining_min)) logger.info('\nPROGRESS: {:.2f}%\n'.format(100 * i / n)) def accuracy(output, target, topk=(1,)): """ Computes the precision@k for the specified values of k """ maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() # one-hot case if target.ndimension() > 1: target = target.max(1)[1] correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(1.0 / batch_size)) return res def save_checkpoint(state, ckpt_dir, is_best=False): filename = os.path.join(ckpt_dir, 'checkpoint.pth.tar') torch.save(state, filename) if is_best: best_filename = os.path.join(ckpt_dir, 'best.pth.tar') torch.save(state, best_filename) # shutil.copyfile(filename, best_filename) def reduce_tensor(tensor, world_size): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) rt /= world_size return rt def drop_path_(x, drop_prob, training): if training and drop_prob > 0.: keep_prob = 1. - drop_prob # per data point mask; assuming x in cuda. mask = torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) x.div_(keep_prob).mul_(mask) return x def adjust_lr(optimizer, epoch, config): # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large if config.epochs - epoch > 5: lr = config.lr * (config.epochs - 5 - epoch) / (config.epochs - 5) else: lr = config.lr * (config.epochs - epoch) / ((config.epochs - 5) * 5) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr
Cream/CDARTS/benchmark201/utils/utils.py/0
{ "file_path": "Cream/CDARTS/benchmark201/utils/utils.py", "repo_id": "Cream", "token_count": 1967 }
309
import torch.nn as nn from lib.models import ops class ModelAug(nn.Module): def __init__(self, feature_extractor, nas_layers, fc_layer, n_nodes=4, aux_head=None): """ args: """ super(ModelAug, self).__init__() self.feature_extractor = feature_extractor self.nas_layers = nas_layers self.nas_layers_num = len(nas_layers) self.fc = fc_layer self.aux_head = aux_head self.gap = nn.AdaptiveAvgPool2d(1) def forward(self, x): logits_aux = None if len(self.feature_extractor) == 1: s0 = self.feature_extractor[0](x) s1 = s0 elif len(self.feature_extractor) == 2: s0 = self.feature_extractor[0](x) s1 = self.feature_extractor[1](s0) else: raise NotImplementedError sp = s1 for i in range(self.nas_layers_num): s0, s1 = self.forward_nas_layer(s0, s1, self.nas_layers[i]) # if i == (self.nas_layers_num * 2 // 3 - 1): if i == (self.nas_layers_num - 2): if self.training: logits_aux = self.aux_head(s1) out = self.gap(s1) out = out.view(out.size(0), -1) # flatten logits = self.fc(out) return logits, logits_aux def forward_nas_layer(self, s0, s1, nas_layer): for cell in nas_layer: s0, s1 = s1, cell(s0, s1) return s0, s1 def drop_path_prob(self, p): """ Set drop path probability """ for module in self.modules(): if isinstance(module, ops.DropPath_): module.p = p
Cream/CDARTS/lib/models/model_augment.py/0
{ "file_path": "Cream/CDARTS/lib/models/model_augment.py", "repo_id": "Cream", "token_count": 898 }
310
AUTO_RESUME: False DATA_DIR: './data/imagenet' MODEL: '112m_retrain' RESUME_PATH: './experiments/workspace/retrain/resume.pth.tar' SAVE_PATH: './experiments/workspace/retrain' SEED: 42 LOG_INTERVAL: 50 RECOVERY_INTERVAL: 0 WORKERS: 8 NUM_GPU: 8 SAVE_IMAGES: False AMP: False OUTPUT: 'None' EVAL_METRICS: 'prec1' TTA: 0 LOCAL_RANK: 0 DATASET: NUM_CLASSES: 1000 IMAGE_SIZE: 224 # image patch size INTERPOLATION: 'random' # Image resize interpolation type BATCH_SIZE: 128 # batch size NO_PREFECHTER: False NET: GP: 'avg' DROPOUT_RATE: 0.2 SELECTION: 470 EMA: USE: True FORCE_CPU: False # force model ema to be tracked on CPU DECAY: 0.9999 LR: 0.064 EPOCHS: 500 OPT_EPS: 1e-3 SCHED: 'cosine' OPT: 'rmsproptf' WARMUP_LR: 1e-6 DECAY_EPOCHS: 2.4 DECAY_RATE: 0.973 WARMUP_EPOCHS: 3 WEIGHT_DECAY: 1e-5 AUGMENTATION: AA: 'rand-m9-mstd0.5' RE_PROB: 0.2 # random erase prob RE_MODE: 'pixel' # random erase mode
Cream/Cream/experiments/configs/retrain/114.yaml/0
{ "file_path": "Cream/Cream/experiments/configs/retrain/114.yaml", "repo_id": "Cream", "token_count": 442 }
311
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # Written by Hao Du and Houwen Peng # email: haodu8-c@my.cityu.edu.hk and houwen.peng@microsoft.com import time import torch from collections import OrderedDict from lib.utils.util import AverageMeter, accuracy, reduce_tensor # validate function def validate(epoch, model, loader, loss_fn, cfg, log_suffix='', logger=None, writer=None, local_rank=0): batch_time_m = AverageMeter() losses_m = AverageMeter() prec1_m = AverageMeter() prec5_m = AverageMeter() model.eval() end = time.time() last_idx = len(loader) - 1 with torch.no_grad(): for batch_idx, (input, target) in enumerate(loader): last_batch = batch_idx == last_idx output = model(input) if isinstance(output, (tuple, list)): output = output[0] # augmentation reduction reduce_factor = cfg.TTA if reduce_factor > 1: output = output.unfold( 0, reduce_factor, reduce_factor).mean( dim=2) target = target[0:target.size(0):reduce_factor] loss = loss_fn(output, target) prec1, prec5 = accuracy(output, target, topk=(1, 5)) if cfg.NUM_GPU > 1: reduced_loss = reduce_tensor(loss.data, cfg.NUM_GPU) prec1 = reduce_tensor(prec1, cfg.NUM_GPU) prec5 = reduce_tensor(prec5, cfg.NUM_GPU) else: reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) prec1_m.update(prec1.item(), output.size(0)) prec5_m.update(prec5.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() if local_rank == 0 and (last_batch or batch_idx % cfg.LOG_INTERVAL == 0): log_name = 'Test' + log_suffix logger.info( '{0}: [{1:>4d}/{2}] ' 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' 'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' 'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=prec1_m, top5=prec5_m)) writer.add_scalar( 'Loss' + log_suffix + '/vaild', prec1_m.avg, epoch * len(loader) + batch_idx) writer.add_scalar( 'Accuracy' + log_suffix + '/vaild', prec1_m.avg, epoch * len(loader) + batch_idx) metrics = OrderedDict( [('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)]) return metrics
Cream/Cream/lib/core/test.py/0
{ "file_path": "Cream/Cream/lib/core/test.py", "repo_id": "Cream", "token_count": 1762 }
312
yacs numpy==1.17 opencv-python==4.0.1.24 torchvision==0.2.1 thop git+https://github.com/sovrasov/flops-counter.pytorch.git pillow==6.1.0 torch==1.2 timm==0.1.20 tensorboardx==1.2 tensorboard future
Cream/Cream/requirements/0
{ "file_path": "Cream/Cream/requirements", "repo_id": "Cream", "token_count": 98 }
313
# -------------------------------------------------------- # EfficientViT Model Architecture for Downstream Tasks # Copyright (c) 2022 Microsoft # Written by: Xinyu Liu # -------------------------------------------------------- import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint import itertools from timm.models.vision_transformer import trunc_normal_ from timm.models.layers import SqueezeExcite, DropPath, to_2tuple import numpy as np import itertools from mmcv_custom import load_checkpoint, _load_checkpoint, load_state_dict from mmdet.utils import get_root_logger from mmdet.models.builder import BACKBONES from torch.nn.modules.batchnorm import _BatchNorm class Conv2d_BN(torch.nn.Sequential): def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1, resolution=-10000): super().__init__() self.add_module('c', torch.nn.Conv2d( a, b, ks, stride, pad, dilation, groups, bias=False)) self.add_module('bn', torch.nn.BatchNorm2d(b)) torch.nn.init.constant_(self.bn.weight, bn_weight_init) torch.nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): c, bn = self._modules.values() w = bn.weight / (bn.running_var + bn.eps)**0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / \ (bn.running_var + bn.eps)**0.5 m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size( 0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class BN_Linear(torch.nn.Sequential): def __init__(self, a, b, bias=True, std=0.02): super().__init__() self.add_module('bn', torch.nn.BatchNorm1d(a)) self.add_module('l', torch.nn.Linear(a, b, bias=bias)) trunc_normal_(self.l.weight, std=std) if bias: torch.nn.init.constant_(self.l.bias, 0) @torch.no_grad() def fuse(self): bn, l = self._modules.values() w = bn.weight / (bn.running_var + bn.eps)**0.5 b = bn.bias - self.bn.running_mean * \ self.bn.weight / (bn.running_var + bn.eps)**0.5 w = l.weight * w[None, :] if l.bias is None: b = b @ self.l.weight.T else: b = (l.weight @ b[:, None]).view(-1) + self.l.bias m = torch.nn.Linear(w.size(1), w.size(0)) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def replace_batchnorm(net): for child_name, child in net.named_children(): if hasattr(child, 'fuse'): setattr(net, child_name, child.fuse()) elif isinstance(child, torch.nn.BatchNorm2d): setattr(net, child_name, torch.nn.Identity()) else: replace_batchnorm(child) class PatchMerging(torch.nn.Module): def __init__(self, dim, out_dim, input_resolution): super().__init__() hid_dim = int(dim * 4) self.conv1 = Conv2d_BN(dim, hid_dim, 1, 1, 0, resolution=input_resolution) self.act = torch.nn.ReLU() self.conv2 = Conv2d_BN(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim, resolution=input_resolution) self.se = SqueezeExcite(hid_dim, .25) self.conv3 = Conv2d_BN(hid_dim, out_dim, 1, 1, 0, resolution=input_resolution // 2) def forward(self, x): x = self.conv3(self.se(self.act(self.conv2(self.act(self.conv1(x)))))) return x class Residual(torch.nn.Module): def __init__(self, m, drop=0.): super().__init__() self.m = m self.drop = drop def forward(self, x): if self.training and self.drop > 0: return x + self.m(x) * torch.rand(x.size(0), 1, 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() else: return x + self.m(x) class FFN(torch.nn.Module): def __init__(self, ed, h, resolution): super().__init__() self.pw1 = Conv2d_BN(ed, h, resolution=resolution) self.act = torch.nn.ReLU() self.pw2 = Conv2d_BN(h, ed, bn_weight_init=0, resolution=resolution) def forward(self, x): x = self.pw2(self.act(self.pw1(x))) return x class CascadedGroupAttention(torch.nn.Module): r""" Cascaded Group Attention. Args: dim (int): Number of input channels. key_dim (int): The dimension for query and key. num_heads (int): Number of attention heads. attn_ratio (int): Multiplier for the query dim for value dimension. resolution (int): Input resolution, correspond to the window size. kernels (List[int]): The kernel size of the dw conv on query. """ def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, kernels=[5, 5, 5, 5],): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.d = int(attn_ratio * key_dim) self.attn_ratio = attn_ratio qkvs = [] dws = [] for i in range(num_heads): qkvs.append(Conv2d_BN(dim // (num_heads), self.key_dim * 2 + self.d, resolution=resolution)) dws.append(Conv2d_BN(self.key_dim, self.key_dim, kernels[i], 1, kernels[i]//2, groups=self.key_dim, resolution=resolution)) self.qkvs = torch.nn.ModuleList(qkvs) self.dws = torch.nn.ModuleList(dws) self.proj = torch.nn.Sequential(torch.nn.ReLU(), Conv2d_BN( self.d * num_heads, dim, bn_weight_init=0, resolution=resolution)) points = list(itertools.product(range(resolution), range(resolution))) N = len(points) attention_offsets = {} idxs = [] for p1 in points: for p2 in points: offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) idxs.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter( torch.zeros(num_heads, len(attention_offsets))) self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N)) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and hasattr(self, 'ab'): del self.ab else: self.ab = self.attention_biases[:, self.attention_bias_idxs] def forward(self, x): # x (B,C,H,W) B, C, H, W = x.shape trainingab = self.attention_biases[:, self.attention_bias_idxs] feats_in = x.chunk(len(self.qkvs), dim=1) feats_out = [] feat = feats_in[0] for i, qkv in enumerate(self.qkvs): if i > 0: # add the previous output to the input feat = feat + feats_in[i] feat = qkv(feat) q, k, v = feat.view(B, -1, H, W).split([self.key_dim, self.key_dim, self.d], dim=1) # B, C/h, H, W q = self.dws[i](q) q, k, v = q.flatten(2), k.flatten(2), v.flatten(2) # B, C/h, N attn = ( (q.transpose(-2, -1) @ k) * self.scale + (trainingab[i] if self.training else self.ab[i]) ) attn = attn.softmax(dim=-1) # BNN feat = (v @ attn.transpose(-2, -1)).view(B, self.d, H, W) # BCHW feats_out.append(feat) x = self.proj(torch.cat(feats_out, 1)) return x class LocalWindowAttention(torch.nn.Module): r""" Local Window Attention. Args: dim (int): Number of input channels. key_dim (int): The dimension for query and key. num_heads (int): Number of attention heads. attn_ratio (int): Multiplier for the query dim for value dimension. resolution (int): Input resolution. window_resolution (int): Local window resolution. kernels (List[int]): The kernel size of the dw conv on query. """ def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, window_resolution=7, kernels=[5, 5, 5, 5],): super().__init__() self.dim = dim self.num_heads = num_heads self.resolution = resolution assert window_resolution > 0, 'window_size must be greater than 0' self.window_resolution = window_resolution self.attn = CascadedGroupAttention(dim, key_dim, num_heads, attn_ratio=attn_ratio, resolution=window_resolution, kernels=kernels,) def forward(self, x): B, C, H, W = x.shape if H <= self.window_resolution and W <= self.window_resolution: x = self.attn(x) else: x = x.permute(0, 2, 3, 1) pad_b = (self.window_resolution - H % self.window_resolution) % self.window_resolution pad_r = (self.window_resolution - W % self.window_resolution) % self.window_resolution padding = pad_b > 0 or pad_r > 0 if padding: x = torch.nn.functional.pad(x, (0, 0, 0, pad_r, 0, pad_b)) pH, pW = H + pad_b, W + pad_r nH = pH // self.window_resolution nW = pW // self.window_resolution # window partition, BHWC -> B(nHh)(nWw)C -> BnHnWhwC -> (BnHnW)hwC -> (BnHnW)Chw x = x.view(B, nH, self.window_resolution, nW, self.window_resolution, C).transpose(2, 3).reshape( B * nH * nW, self.window_resolution, self.window_resolution, C ).permute(0, 3, 1, 2) x = self.attn(x) # window reverse, (BnHnW)Chw -> (BnHnW)hwC -> BnHnWhwC -> B(nHh)(nWw)C -> BHWC x = x.permute(0, 2, 3, 1).view(B, nH, nW, self.window_resolution, self.window_resolution, C).transpose(2, 3).reshape(B, pH, pW, C) if padding: x = x[:, :H, :W].contiguous() x = x.permute(0, 3, 1, 2) return x class EfficientViTBlock(torch.nn.Module): """ A basic EfficientViT building block. Args: type (str): Type for token mixer. Default: 's' for self-attention. ed (int): Number of input channels. kd (int): Dimension for query and key in the token mixer. nh (int): Number of attention heads. ar (int): Multiplier for the query dim for value dimension. resolution (int): Input resolution. window_resolution (int): Local window resolution. kernels (List[int]): The kernel size of the dw conv on query. """ def __init__(self, type, ed, kd, nh=8, ar=4, resolution=14, window_resolution=7, kernels=[5, 5, 5, 5],): super().__init__() self.dw0 = Residual(Conv2d_BN(ed, ed, 3, 1, 1, groups=ed, bn_weight_init=0., resolution=resolution)) self.ffn0 = Residual(FFN(ed, int(ed * 2), resolution)) if type == 's': self.mixer = Residual(LocalWindowAttention(ed, kd, nh, attn_ratio=ar, \ resolution=resolution, window_resolution=window_resolution, kernels=kernels)) self.dw1 = Residual(Conv2d_BN(ed, ed, 3, 1, 1, groups=ed, bn_weight_init=0., resolution=resolution)) self.ffn1 = Residual(FFN(ed, int(ed * 2), resolution)) def forward(self, x): return self.ffn1(self.dw1(self.mixer(self.ffn0(self.dw0(x))))) class EfficientViT(torch.nn.Module): def __init__(self, img_size=400, patch_size=16, frozen_stages=0, in_chans=3, stages=['s', 's', 's'], embed_dim=[64, 128, 192], key_dim=[16, 16, 16], depth=[1, 2, 3], num_heads=[4, 4, 4], window_size=[7, 7, 7], kernels=[5, 5, 5, 5], down_ops=[['subsample', 2], ['subsample', 2], ['']], pretrained=None, distillation=False,): super().__init__() resolution = img_size self.patch_embed = torch.nn.Sequential(Conv2d_BN(in_chans, embed_dim[0] // 8, 3, 2, 1, resolution=resolution), torch.nn.ReLU(), Conv2d_BN(embed_dim[0] // 8, embed_dim[0] // 4, 3, 2, 1, resolution=resolution // 2), torch.nn.ReLU(), Conv2d_BN(embed_dim[0] // 4, embed_dim[0] // 2, 3, 2, 1, resolution=resolution // 4), torch.nn.ReLU(), Conv2d_BN(embed_dim[0] // 2, embed_dim[0], 3, 2, 1, resolution=resolution // 8)) resolution = img_size // patch_size attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))] self.blocks1 = [] self.blocks2 = [] self.blocks3 = [] for i, (stg, ed, kd, dpth, nh, ar, wd, do) in enumerate( zip(stages, embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)): for d in range(dpth): eval('self.blocks' + str(i+1)).append(EfficientViTBlock(stg, ed, kd, nh, ar, resolution, wd, kernels)) if do[0] == 'subsample': #('Subsample' stride) blk = eval('self.blocks' + str(i+2)) resolution_ = (resolution - 1) // do[1] + 1 blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i], embed_dim[i], 3, 1, 1, groups=embed_dim[i], resolution=resolution)), Residual(FFN(embed_dim[i], int(embed_dim[i] * 2), resolution)),)) blk.append(PatchMerging(*embed_dim[i:i + 2], resolution)) resolution = resolution_ blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i + 1], embed_dim[i + 1], 3, 1, 1, groups=embed_dim[i + 1], resolution=resolution)), Residual(FFN(embed_dim[i + 1], int(embed_dim[i + 1] * 2), resolution)),)) self.blocks1 = torch.nn.Sequential(*self.blocks1) self.blocks2 = torch.nn.Sequential(*self.blocks2) self.blocks3 = torch.nn.Sequential(*self.blocks3) self.frozen_stages = frozen_stages # freeze the patch embedding self._freeze_stages() if pretrained is not None: self.init_weights(pretrained=pretrained) def _freeze_stages(self): if self.frozen_stages >= 0: self.patch_embed.eval() for param in self.patch_embed.parameters(): param.requires_grad = False def init_weights(self, pretrained=None): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() checkpoint = _load_checkpoint(pretrained, map_location='cpu') if not isinstance(checkpoint, dict): raise RuntimeError( f'No state_dict found in checkpoint file {filename}') # get state_dict from checkpoint if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] elif 'model' in checkpoint: state_dict = checkpoint['model'] else: state_dict = checkpoint # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in state_dict.items()} model_state_dict = self.state_dict() # bicubic interpolate attention_biases if not match rpe_idx_keys = [ k for k in state_dict.keys() if "attention_bias_idxs" in k] for k in rpe_idx_keys: print("deleting key: ", k) del state_dict[k] relative_position_bias_table_keys = [ k for k in state_dict.keys() if "attention_biases" in k] for k in relative_position_bias_table_keys: relative_position_bias_table_pretrained = state_dict[k] relative_position_bias_table_current = model_state_dict[k] nH1, L1 = relative_position_bias_table_pretrained.size() nH2, L2 = relative_position_bias_table_current.size() if nH1 != nH2: logger.warning(f"Error in loading {k} due to different number of heads") else: if L1 != L2: print("resizing key {} from {} * {} to {} * {}".format(k, L1, L1, L2, L2)) # bicubic interpolate relative_position_bias_table if not match S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate( relative_position_bias_table_pretrained.view(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[k] = relative_position_bias_table_pretrained_resized.view( nH2, L2) load_state_dict(self, state_dict, strict=False, logger=logger) @torch.jit.ignore def no_weight_decay(self): return {x for x in self.state_dict().keys() if 'attention_biases' in x} def train(self, mode=True): """Convert the model into training mode while keep layers freezed.""" super(EfficientViT, self).train(mode) self._freeze_stages() if mode: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() def forward(self, x): x = self.patch_embed(x) outs = [] x = self.blocks1(x) outs.append(x) x = self.blocks2(x) outs.append(x) x = self.blocks3(x) outs.append(x) return tuple(outs) EfficientViT_m0 = { 'img_size': 224, 'patch_size': 16, 'embed_dim': [64, 128, 192], 'depth': [1, 2, 3], 'num_heads': [4, 4, 4], 'window_size': [7, 7, 7], 'kernels': [7, 5, 3, 3], } EfficientViT_m1 = { 'img_size': 224, 'patch_size': 16, 'embed_dim': [128, 144, 192], 'depth': [1, 2, 3], 'num_heads': [2, 3, 3], 'window_size': [7, 7, 7], 'kernels': [7, 5, 3, 3], } EfficientViT_m2 = { 'img_size': 224, 'patch_size': 16, 'embed_dim': [128, 192, 224], 'depth': [1, 2, 3], 'num_heads': [4, 3, 2], 'window_size': [7, 7, 7], 'kernels': [7, 5, 3, 3], } EfficientViT_m3 = { 'img_size': 224, 'patch_size': 16, 'embed_dim': [128, 240, 320], 'depth': [1, 2, 3], 'num_heads': [4, 3, 4], 'window_size': [7, 7, 7], 'kernels': [5, 5, 5, 5], } EfficientViT_m4 = { 'img_size': 224, 'patch_size': 16, 'embed_dim': [128, 256, 384], 'depth': [1, 2, 3], 'num_heads': [4, 4, 4], 'window_size': [7, 7, 7], 'kernels': [7, 5, 3, 3], } EfficientViT_m5 = { 'img_size': 224, 'patch_size': 16, 'embed_dim': [192, 288, 384], 'depth': [1, 3, 4], 'num_heads': [3, 3, 4], 'window_size': [7, 7, 7], 'kernels': [7, 5, 3, 3], } @BACKBONES.register_module() def EfficientViT_M0(pretrained=False, frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m0): model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg) if fuse: replace_batchnorm(model) return model @BACKBONES.register_module() def EfficientViT_M1(pretrained=False, frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m1): model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg) if fuse: replace_batchnorm(model) return model @BACKBONES.register_module() def EfficientViT_M2(pretrained=False, frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m2): model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg) if fuse: replace_batchnorm(model) return model @BACKBONES.register_module() def EfficientViT_M3(pretrained=False, frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m3): model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg) if fuse: replace_batchnorm(model) return model @BACKBONES.register_module() def EfficientViT_M4(pretrained=False, frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m4): model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg) if fuse: replace_batchnorm(model) return model @BACKBONES.register_module() def EfficientViT_M5(pretrained=False, frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m5): model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg) if fuse: replace_batchnorm(model) return model
Cream/EfficientViT/downstream/efficientvit.py/0
{ "file_path": "Cream/EfficientViT/downstream/efficientvit.py", "repo_id": "Cream", "token_count": 10995 }
314
import os import json from torchvision import datasets, transforms from torchvision.datasets.folder import ImageFolder, default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.data import create_transform class INatDataset(ImageFolder): def __init__(self, root, train=True, year=2018, transform=None, target_transform=None, category='name', loader=default_loader): self.transform = transform self.loader = loader self.target_transform = target_transform self.year = year # assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name'] path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json') with open(path_json) as json_file: data = json.load(json_file) with open(os.path.join(root, 'categories.json')) as json_file: data_catg = json.load(json_file) path_json_for_targeter = os.path.join(root, f"train{year}.json") with open(path_json_for_targeter) as json_file: data_for_targeter = json.load(json_file) targeter = {} indexer = 0 for elem in data_for_targeter['annotations']: king = [] king.append(data_catg[int(elem['category_id'])][category]) if king[0] not in targeter.keys(): targeter[king[0]] = indexer indexer += 1 self.nb_classes = len(targeter) self.samples = [] for elem in data['images']: cut = elem['file_name'].split('/') target_current = int(cut[2]) path_current = os.path.join(root, cut[0], cut[2], cut[3]) categors = data_catg[target_current] target_current_true = targeter[categors[category]] self.samples.append((path_current, target_current_true)) # __getitem__ and __len__ inherited from ImageFolder def build_dataset(is_train, args): transform = build_transform(is_train, args) if args.data_set == 'CIFAR100': dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True) nb_classes = 100 elif args.data_set == 'CIFAR10': dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform, download=True) nb_classes = 10 elif args.data_set == 'IMNET': root = os.path.join(args.data_path, 'train' if is_train else 'val') dataset = datasets.ImageFolder(root, transform=transform) nb_classes = 1000 elif args.data_set == 'INAT': dataset = INatDataset(args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform) nb_classes = dataset.nb_classes elif args.data_set == 'INAT19': dataset = INatDataset(args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform) nb_classes = dataset.nb_classes return dataset, nb_classes def build_transform(is_train, args): resize_im = args.input_size > 32 if is_train: # this should always dispatch to transforms_imagenet_train transform = create_transform( input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, ) if not resize_im: # replace RandomResizedCropAndInterpolation with # RandomCrop transform.transforms[0] = transforms.RandomCrop( args.input_size, padding=4) return transform t = [] if resize_im: size = int((256 / 224) * args.input_size) t.append( transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images ) t.append(transforms.CenterCrop(args.input_size)) t.append(transforms.ToTensor()) t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)) return transforms.Compose(t)
Cream/MiniViT/Mini-DeiT/datasets.py/0
{ "file_path": "Cream/MiniViT/Mini-DeiT/datasets.py", "repo_id": "Cream", "token_count": 1890 }
315
import torch class SubsetRandomSampler(torch.utils.data.Sampler): r"""Samples elements randomly from a given list of indices, without replacement. Arguments: indices (sequence): a sequence of indices """ def __init__(self, indices): self.epoch = 0 self.indices = indices def __iter__(self): return (self.indices[i] for i in torch.randperm(len(self.indices))) def __len__(self): return len(self.indices) def set_epoch(self, epoch): self.epoch = epoch
Cream/MiniViT/Mini-Swin/data/samplers.py/0
{ "file_path": "Cream/MiniViT/Mini-Swin/data/samplers.py", "repo_id": "Cream", "token_count": 209 }
316
# MiniViT: Compressing Vision Transformers with Weight Multiplexing :sunny: Hiring research interns for neural architecture search, tiny transformer design, model compression projects: houwen.peng@microsoft.com. **This is an official implementation of [MiniViT](https://openaccess.thecvf.com/content/CVPR2022/html/Zhang_MiniViT_Compressing_Vision_Transformers_With_Weight_Multiplexing_CVPR_2022_paper.html), including Mini-DeiT and Mini-Swin.** **[CVPR'2022]** - [MiniViT: Compressing Vision Transformers with Weight Multiplexing](https://openaccess.thecvf.com/content/CVPR2022/html/Zhang_MiniViT_Compressing_Vision_Transformers_With_Weight_Multiplexing_CVPR_2022_paper.html) MiniViT is a new compression framework that achieves parameter reduction in vision transformers while retaining the same performance. The central idea of MiniViT is to multiplex the weights of consecutive transformer blocks. Specifically, we make the weights shared across layers, while imposing a transformation on the weights to increase diversity. Weight distillation over self-attention is also applied to transfer knowledge from large-scale ViT models to weight-multiplexed compact models. <div align="center"> <img width="100%" src=".figure/framework.png"/> </div> ## Highlights - **Accurate** MiniViT reduces the size of Swin-B by **48%**, while achieving **1.0%** better Top-1 accuracy on ImageNet. - **Small** MiniViT can compress DeiT-B (86M) to **9M** (**9.7x**), without seriously compromising the accuracy. ## Model Zoo For evaluation, we provide the checkpoints of our models in the following table. Model | Params. | Input | Top-1 Acc. % | Top-5 Acc. % | Download link --- |:---:|:---:|:---:|:---:|:---: Mini-DeiT-Ti | 3M | 224x224 | 73.0 | 91.6 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_tiny_patch16_224.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_tiny.txt) Mini-DeiT-S | 11M | 224x224 | 80.9 | 95.6 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_small_patch16_224.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_small.txt) Mini-DeiT-B | 44M | 224x224 | 83.2 | 96.5 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_base_patch16_224.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_base.txt) Mini-DeiT-B| 44M | 384x384 | 84.9 | 97.2 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini_deit_base_patch16_384.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_deit_base_384.txt) Mini-Swin-T | 12M | 224x224 | 81.3 | 95.7 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-tiny-12m.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_tiny.txt) Mini-Swin-S | 26M | 224x224 | 83.9 | 97.0 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-small-26m.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_small.txt) Mini-Swin-B | 46M | 224x224 | 84.5| 97.3 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-base-46m.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_base.txt) Mini-Swin-B | 47M | 384x384 | 85.5 | 97.6 | [model](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/mini-swin-base-224to384.pth), [log](https://github.com/DominickZhang/MiniViT-model-zoo/releases/download/v1.0.0/log_mini_swin_base_384.txt) ## Getting Started - For **Mini-DeiT**, please see [Mini-DeiT](./Mini-DeiT) for detailed instructions. - For **Mini-Swin**, please see [Mini-Swin](./Mini-Swin) for a quick start. ## Bibtex If this repo is helpful for you, please consider to cite it. Thank you! :) ```bibtex @InProceedings{MiniViT, title = {MiniViT: Compressing Vision Transformers With Weight Multiplexing}, author = {Zhang, Jinnian and Peng, Houwen and Wu, Kan and Liu, Mengchen and Xiao, Bin and Fu, Jianlong and Yuan, Lu}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {12145-12154} } ```
Cream/MiniViT/README.md/0
{ "file_path": "Cream/MiniViT/README.md", "repo_id": "Cream", "token_count": 1700 }
317
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ import functools import inspect from copy import deepcopy import os import random import copy from contextlib import nullcontext from argparse import Namespace from dataclasses import dataclass import functools import logging import math from typing import Tuple, Union, Callable, Optional import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.utils.checkpoint import checkpoint # apply the non-reentrant variant of checkpoint if 'use_reentrant' in inspect.signature(checkpoint).parameters: checkpoint = functools.partial(checkpoint, use_reentrant=False) from .timm_model import TimmModel from .utils import freeze_batch_norm_2d, to_2tuple from .resnet import ModifiedResNet from .l0module import L0Module def load_state_dict(model, state_dict): model.load_state_dict(state_dict, strict=True) class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor, hidden_z=None): ''' x: (N, L, C) hidden_z: (C,) ''' self.hidden_z = hidden_z orig_type = x.dtype if hidden_z is None: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: assert len(self.normalized_shape) == 1 # [TODO] weighted layer norm remaining_index = torch.where(hidden_z != 0)[0] compressed_input = torch.index_select( x, dim=-1, index=remaining_index) compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] normalized_shape = len(remaining_index) normed_input = F.layer_norm( compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps) x = x.new_zeros(x.shape) x[..., remaining_index] = normed_input.to(orig_type) return x.to(orig_type) def prune(self): if self.hidden_z is None: return self hidden_z = self.hidden_z assert len(self.normalized_shape) == 1 remaining_index = torch.where(hidden_z != 0)[0] compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] # m = self m = LayerNorm(remaining_index.shape[0]).to(self.weight.device) m.normalized_shape = (len(remaining_index),) m.weight.data = compressed_weight.contiguous() m.bias.data = compressed_bias.contiguous() return m def prune_mul_hidden(self): if self.hidden_z is None: return self hidden_z = self.hidden_z assert len(self.normalized_shape) == 1 remaining_index = torch.where(hidden_z != 0)[0] compressed_weight = self.weight[remaining_index] * \ hidden_z[remaining_index] compressed_bias = self.bias[remaining_index] * \ hidden_z[remaining_index] m = self m.normalized_shape = (len(remaining_index),) m.weight.data = compressed_weight.contiguous() m.bias.data = compressed_bias.contiguous() return m class QuickGELU(nn.Module): # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory def forward(self, x: torch.Tensor): return x * torch.sigmoid(1.702 * x) class Mlp(nn.Module): def __init__(self, d_model, mlp_width, act_layer=nn.GELU, scale_fc=False): super().__init__() self.d_model = d_model self.mlp_width = mlp_width self.c_fc = nn.Linear(d_model, mlp_width) assert not scale_fc # self.ln = LayerNorm(mlp_width) if scale_fc else nn.Identity() self.act_layer = act_layer self.scale_fc = scale_fc self.gelu = act_layer() self.c_proj = nn.Linear(mlp_width, d_model) def forward(self, x, hidden_z=None, intermediate_z=None): ''' x: (N, L, C) intermediate_z: (mlp_width,) or (1, 1, mlp_width) hidden_z: (embed_dim,) or (1, 1, embed_dim) ''' self.hidden_z = hidden_z self.intermediate_z = intermediate_z x = self.c_fc(x) x = self.gelu(x) if intermediate_z is not None: x = torch.mul(x, intermediate_z) x = self.c_proj(x) if hidden_z is not None: x = torch.mul(x, hidden_z) return x def prune(self): device = self.c_fc.weight.device if self.hidden_z is None: self.hidden_z = torch.ones( (self.d_model,), dtype=torch.bool, device=device) if self.intermediate_z is None: self.intermediate_z = torch.ones( (self.mlp_width,), dtype=torch.bool, device=device) hidden_r = torch.where(self.hidden_z != 0)[0] intermediate_r = torch.where(self.intermediate_z != 0)[0] d_model = len(hidden_r) mlp_width = len(intermediate_r) # m = self m = copy.deepcopy(self) m.c_fc = nn.Linear(hidden_r.shape[0], intermediate_r.shape[0]) m.c_proj = nn.Linear(intermediate_r.shape[0], hidden_r.shape[0]) m.d_model = d_model m.mlp_width = mlp_width m.c_fc.weight = nn.Parameter( (self.c_fc.weight[intermediate_r][:, hidden_r]).contiguous()) m.c_fc.bias = nn.Parameter( (self.c_fc.bias[intermediate_r]).contiguous()) m.c_proj.weight = nn.Parameter(((self.c_proj.weight * self.intermediate_z.view(1, -1) * self.hidden_z.view(-1, 1))[hidden_r][:, intermediate_r]).contiguous()) m.c_proj.bias = nn.Parameter( ((self.c_proj.bias * self.hidden_z)[hidden_r]).contiguous()) return m class MultiheadAttention(nn.MultiheadAttention): def prune(self): device = self.in_proj_weight.device if self.hidden_z is None: self.hidden_z = torch.ones( (self.embed_dim,), dtype=torch.bool, device=device) if self.head_z is None: self.head_z = torch.ones( (self.num_heads,), dtype=torch.bool, device=device) hidden_r = torch.where(self.hidden_z != 0)[0] head_r = torch.where(self.head_z != 0)[0] d_model = len(hidden_r) d_head = len(head_r) org_num_heads = self.num_heads org_head_dim = self.head_dim org_embed_dim = self.embed_dim mod = self mod.use_naive_compute = True mod.embed_dim = d_model mod.head_dim = self.head_dim mod.num_heads = d_head inter_dim = d_head * self.head_dim mod.in_proj_weight = nn.Parameter(self.in_proj_weight.view( 3, org_num_heads, org_head_dim, org_embed_dim)[:, head_r][..., hidden_r].reshape(-1, d_model)) if self.in_proj_bias is not None: mod.in_proj_bias = nn.Parameter(self.in_proj_bias.view( 3, org_num_heads, org_head_dim)[:, head_r].reshape(-1)) mod.out_proj.weight = nn.Parameter( ((self.out_proj.weight * self.hidden_z.view(-1, 1)). view(org_embed_dim, org_num_heads, org_head_dim) * self.head_z.view(1, org_num_heads, 1))[hidden_r][:, head_r].reshape(d_model, -1) ) if self.out_proj.bias is not None: mod.out_proj.bias = nn.Parameter( (self.out_proj.bias * self.hidden_z.view(-1,)). view(org_embed_dim)[hidden_r].reshape(-1) ) return mod class ResidualAttentionBlock(nn.Module): def __init__( self, d_model: int, n_head: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU, scale_cosine_attn: bool = False, scale_heads: bool = False, scale_attn: bool = False, scale_fc: bool = False, ): super().__init__() self.ln_1 = LayerNorm(d_model) # FIXME torchscript issues need to be resolved for custom attention # if scale_cosine_attn or scale_heads: # self.attn = Attention( # d_model, n_head, # scaled_cosine=scale_cosine_attn, # scale_heads=scale_heads, # ) self.attn = MultiheadAttention(d_model, n_head) assert not scale_attn self.ln_attn = LayerNorm(d_model) if scale_attn else nn.Identity() self.ln_2 = LayerNorm(d_model) mlp_width = int(d_model * mlp_ratio) self.mlp = Mlp(d_model, mlp_width, act_layer, scale_fc) def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, *, head_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, ): self.attn.head_z = head_z self.attn.hidden_z = hidden_z if (head_z is None and hidden_z is None and not getattr(self.attn, 'use_naive_compute', False)): return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0] else: # the following code does not support `attn_mask` # x: (length, batch_size, embed_dim) n_head = self.attn.num_heads length, batch_size, d_model = x.shape ws = self.attn.in_proj_weight.chunk(3) bs = self.attn.in_proj_bias.chunk(3) dim_per_head = len(ws[0]) // n_head # (length, batch_size, n_head * dim_per_head) q, k, v = [F.linear(x, w, b) for w, b in zip(ws, bs)] # (batch_size * n_head, length, d_head) q = q.reshape(length, batch_size * n_head, -1).transpose(0, 1) k = k.reshape(length, batch_size * n_head, -1).transpose(0, 1) v = v.reshape(length, batch_size * n_head, -1).transpose(0, 1) scale = dim_per_head ** -0.5 q *= scale # (batch_size * n_head, length, length) sim = q @ k.transpose(1, 2) if attn_mask is not None: sim += attn_mask sim = torch.softmax(sim, -1) # (batch_size * n_head, length, head_dim) out = sim @ v if head_z is not None: out = out.view(batch_size, n_head, length, dim_per_head) # head_z: (1, n_head, 1, 1) out *= head_z.view(1, -1, 1, 1) out = out.view(batch_size * n_head, length, dim_per_head) out = out.transpose(0, 1).reshape(length, batch_size, -1) out = F.linear(out, self.attn.out_proj.weight, self.attn.out_proj.bias) if hidden_z is not None: out = torch.mul(out, hidden_z) return out def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, heads_z: Optional[torch.Tensor] = None, mha_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, ffn_z: Optional[torch.Tensor] = None): self.hidden_z = hidden_z self.heads_z = heads_z self.mha_z = mha_z self.intermediate_z = intermediate_z self.ffn_z = ffn_z # x: (length, batch_size, embed_dim) e.g. 50, 128, 768 for vision if self.attention is not None: attn_out = self.attention(self.ln_1(x, hidden_z=hidden_z), attn_mask=attn_mask, head_z=heads_z, hidden_z=hidden_z) if mha_z is not None: # a number attn_out = attn_out.mul(mha_z) x = x + attn_out if self.mlp is not None: ln_2_out = self.ln_2(x, hidden_z=hidden_z) mlp_out = self.mlp(ln_2_out, intermediate_z=intermediate_z, hidden_z=hidden_z) if ffn_z is not None: # a number mlp_out = mlp_out.mul(ffn_z) x = x + mlp_out return x def prune(self): mod = self if (self.mha_z is not None and self.mha_z.item() == 0) or (self.heads_z).sum() == 0: mod.ln_1 = None mod.attn = None mod.attention = None else: mod.ln_1 = mod.ln_1.prune() mod.attn = mod.attn.prune() if self.mha_z is not None: mod.attn.out_proj.weight.data *= self.mha_z mod.attn.out_proj.bias.data *= self.mha_z if self.ffn_z is not None and self.ffn_z.item() == 0: mod.ln_2 = None mod.mlp = None else: mod.ln_2 = mod.ln_2.prune() mod.mlp = mod.mlp.prune() if self.ffn_z is not None: mod.mlp.c_proj.weight.data *= self.ffn_z mod.mlp.c_proj.bias.data *= self.ffn_z return mod class Transformer(nn.Module): def __init__(self, width: int, layers: int, heads: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU): super().__init__() self.width = width self.layers = layers self.grad_checkpointing = False assert width % heads == 0 self.head_dim = width // heads self.num_heads = heads self.mlp_ratio = mlp_ratio self.resblocks = nn.ModuleList([ ResidualAttentionBlock( width, heads, mlp_ratio, act_layer=act_layer) for _ in range(layers) ]) def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, heads_z: Optional[torch.Tensor] = None, mha_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, ffn_z: Optional[torch.Tensor] = None): return self.infer_blocks(x, attn_mask, hidden_z=hidden_z, heads_z=heads_z, mha_z=mha_z, intermediate_z=intermediate_z, ffn_z=ffn_z) def infer_blocks(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, block_idxs=None, hidden_z: Optional[torch.Tensor] = None, heads_z: Optional[torch.Tensor] = None, mha_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, ffn_z: Optional[torch.Tensor] = None): num_layers = self.layers if hidden_z is not None: assert hidden_z.shape == (self.width,) if heads_z is not None: if heads_z.ndim == 5: heads_z = heads_z.view(num_layers, self.num_heads) assert heads_z.shape in [(num_layers, self.num_heads), (self.num_heads,)], ( heads_z.shape, (num_layers, self.num_heads)) if mha_z is not None: assert mha_z.shape == (num_layers,), mha_z.shape if intermediate_z is not None: if intermediate_z.ndim == 4: intermediate_z = intermediate_z.view(num_layers, -1) assert intermediate_z.shape in [ (num_layers, self.mlp_ratio * self.width), (self.mlp_ratio * self.width,)], intermediate_z.shape if ffn_z is not None: assert ffn_z.shape == (num_layers,), ffn_z.shape def _get_zi(z, i, ndim=2): if z is None: return None if z.ndim == ndim: return z[i] return z block_idxs = block_idxs or list(range(self.layers)) for i in block_idxs: r = self.resblocks[i] if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(r, x, attn_mask, hidden_z, _get_zi(heads_z, i), _get_zi(mha_z, i, ndim=1), _get_zi(intermediate_z, i), _get_zi(ffn_z, i, ndim=1)) else: x = r(x, attn_mask=attn_mask, hidden_z=hidden_z, heads_z=_get_zi(heads_z, i), mha_z=_get_zi(mha_z, i, ndim=1), intermediate_z=_get_zi(intermediate_z, i), ffn_z=_get_zi(ffn_z, i, ndim=1)) return x @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def extra_repr(self): return f'grad_checkpointing={self.grad_checkpointing}' def prune(self): mod = self for i in range(len(self.resblocks)): self.resblocks[i] = self.resblocks[i].prune() return mod class VisualTransformer(nn.Module): def __init__( self, image_size: int, patch_size: int, width: int, layers: int, heads: int, mlp_ratio: float, output_dim: int, act_layer: Callable = nn.GELU, teacher_width: int = -1, ): super().__init__() self.image_size = to_2tuple(image_size) self.patch_size = to_2tuple(patch_size) self.grid_size = ( self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1]) self.output_dim = output_dim self.embed_dim = width self.layers = layers self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) scale = width ** -0.5 self.class_embedding = nn.Parameter(scale * torch.randn(width)) self.positional_embedding = nn.Parameter( scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width)) self.ln_pre = LayerNorm(width) self.transformer = Transformer( width, layers, heads, mlp_ratio, act_layer=act_layer) self.head_dim = width // heads self.ln_post = LayerNorm(width) # image proj if teacher_width > 0: self.proj = nn.Parameter(torch.empty( teacher_width, output_dim), requires_grad=False) else: self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) def lock(self, unlocked_groups=0, freeze_bn_stats=False): assert unlocked_groups == 0, 'partial locking not currently supported for this model' for param in self.parameters(): param.requires_grad = False @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.transformer.set_grad_checkpointing(enable) def forward(self, x: torch.Tensor, hidden_z: Optional[torch.Tensor] = None, heads_z: Optional[torch.Tensor] = None, mha_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, ffn_z: Optional[torch.Tensor] = None, embed_dim_z: Optional[torch.Tensor] = None): self.hidden_z = hidden_z self.embed_dim_z = embed_dim_z x = x.to(self.conv1.weight.device) x = self.conv1(x) # shape = [*, width, grid, grid] # shape = [*, width, grid ** 2] x = x.reshape(x.shape[0], x.shape[1], -1) x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] # the first token is the class token. x = torch.cat( [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, 1 + grid ** 2, width] x = x + self.positional_embedding.to(x.dtype) # 128, 50, 768 if hidden_z is not None: x = torch.mul(x, hidden_z) x = self.ln_pre(x, hidden_z=hidden_z) x = x.permute(1, 0, 2) # NLD -> LND 50, 128, 768 x = self.transformer(x, hidden_z=hidden_z, heads_z=heads_z, mha_z=mha_z, intermediate_z=intermediate_z, ffn_z=ffn_z) x = x.permute(1, 0, 2) # LND -> NLD # select class token x = self.ln_post(x[:, 0, :], hidden_z=hidden_z) if self.proj is not None: x = self.get_proj_feature(x) return x def get_proj_feature(self, x): if self.proj is not None: x = x @ self.proj return x def extra_repr(self): return 'image_size={}, output_dim={}'.format(self.image_size, self.output_dim) def prune(self): hidden_r = torch.where(self.hidden_z != 0)[0] self.conv1.weight = nn.Parameter( (self.conv1.weight.data * self.hidden_z.view(-1, 1, 1, 1))[hidden_r]) if self.conv1.bias is not None: self.conv1.bias = nn.Parameter( (self.conv1.bias * self.hidden_z.view(-1,))[hidden_r]) self.class_embedding = nn.Parameter( (self.class_embedding * self.hidden_z.view(-1,))[hidden_r]) self.positional_embedding = nn.Parameter( (self.positional_embedding * self.hidden_z.view(1, -1))[:, hidden_r]) self.ln_pre = self.ln_pre.prune() self.transformer = self.transformer.prune() self.ln_post = self.ln_post.prune() if self.embed_dim_z is not None: embed_dim_r = self.embed_dim_z > 0 self.proj = nn.Parameter((self.proj * self.hidden_z.view(-1, 1) * self.embed_dim_z.view(1, -1))[hidden_r][:, embed_dim_r]) else: self.proj = nn.Parameter( (self.proj * self.hidden_z.view(-1, 1))[hidden_r]) return self @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 teacher_width: int = -1 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 timm_model_name: str = None # a valid model name overrides layers, width, patch_size # use (imagenet) pretrained weights for named model timm_model_pretrained: bool = False # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_pool: str = 'avg' # linear projection for timm model output ('linear', 'mlp', '') timm_proj: str = 'linear' @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 teacher_width: int = -1 heads: int = 8 layers: int = 12 class ImageEncoder(nn.Module): def __init__(self, embed_dim, vision_cfg, quick_gelu, l0_module_image=False, mask_cfg=None): super().__init__() act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: self.visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, embed_dim=embed_dim, image_size=vision_cfg.image_size ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width self.visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width ) else: vision_heads = vision_cfg.width // vision_cfg.head_width self.visual = VisualTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, output_dim=embed_dim, act_layer=act_layer, teacher_width=vision_cfg.teacher_width, ) self.init_parameters() if l0_module_image: logging.info('use l0_module_vision') config_mask = Namespace() config_mask.hidden_size = vision_cfg.width config_mask.intermediate_size = 4 * vision_cfg.width config_mask.num_attention_heads = vision_heads config_mask.num_hidden_layers = vision_cfg.layers config_mask.sparsity_warmup = mask_cfg.sparsity_warmup config_mask.sparsity = mask_cfg.sparsity config_mask.start_sparsity = mask_cfg.start_sparsity self.l0_module = L0Module(config_mask, lagrangian_warmup=config_mask.sparsity_warmup, start_sparsity=config_mask.start_sparsity, target_sparsity=config_mask.sparsity, pruning_type=["hidden", "heads", "intermediate"]) else: self.l0_module = None self.mask = None def init_parameters(self): if hasattr(self.visual, 'init_parameters'): self.visual.init_parameters() def forward(self, image, normalized=False, **mask): if self.l0_module is not None: mask = self.l0_module.forward() self.mask = mask image_features = self.visual(image, **mask) embed_dim_z = mask.get('embed_dim_z', None) if embed_dim_z is not None: image_features = image_features.mul(embed_dim_z) if normalized: image_features = F.normalize(image_features, dim=-1) return image_features def prune(self): self.visual = self.visual.prune() return self class TextEncoder(nn.Module): def __init__(self, embed_dim, text_cfg, quick_gelu, l0_module_text, mask_cfg=None): super().__init__() act_layer = QuickGELU if quick_gelu else nn.GELU self.context_length = text_cfg.context_length if text_cfg.layers > 0: self.transformer = Transformer( width=text_cfg.width, layers=text_cfg.layers, heads=text_cfg.heads, act_layer=act_layer, ) else: self.transformer = None self.text_projection = None if text_cfg.layers > 0: self.vocab_size = text_cfg.vocab_size self.token_embedding = nn.Embedding( text_cfg.vocab_size, text_cfg.width) self.positional_embedding = nn.Parameter( torch.empty(self.context_length, text_cfg.width)) self.ln_final = LayerNorm(text_cfg.width) if text_cfg.teacher_width > 0: self.text_projection = nn.Parameter(torch.empty( text_cfg.width, embed_dim), requires_grad=False) else: self.text_projection = nn.Parameter( torch.empty(text_cfg.width, embed_dim)) self.register_buffer( 'attn_mask', self.build_attention_mask(), persistent=False) else: self.token_embedding = None self.init_parameters() if l0_module_text: logging.info('use l0_module_text') config_mask = Namespace() config_mask.hidden_size = text_cfg.width config_mask.intermediate_size = 4 * text_cfg.width config_mask.num_attention_heads = text_cfg.heads config_mask.num_hidden_layers = text_cfg.layers config_mask.sparsity_warmup = mask_cfg.sparsity_warmup config_mask.sparsity = mask_cfg.sparsity config_mask.start_sparsity = mask_cfg.start_sparsity self.l0_module = L0Module(config_mask, lagrangian_warmup=config_mask.sparsity_warmup, start_sparsity=config_mask.start_sparsity, target_sparsity=config_mask.sparsity, pruning_type=["hidden", "heads", "intermediate"]) else: self.l0_module = None self.mask = None def init_parameters(self): if self.transformer is not None: nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width ** -0.5) * \ ((2 * self.transformer.layers) ** -0.5) attn_std = self.transformer.width ** -0.5 fc_std = (2 * self.transformer.width) ** -0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) def build_attention_mask(self): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float("-inf")) mask.triu_(1) # zero out the lower diagonal return mask def encode_text(self, text, normalized=False, hidden_z: Optional[torch.Tensor] = None, heads_z: Optional[torch.Tensor] = None, mha_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, ffn_z: Optional[torch.Tensor] = None, embed_dim_z: Optional[torch.Tensor] = None, ): self.hidden_z = hidden_z self.embed_dim_z = embed_dim_z text = text.to(self.token_embedding.weight.device) x = self.token_embedding(text) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding if hidden_z is not None: x = torch.mul(x, hidden_z) x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x, attn_mask=self.attn_mask, hidden_z=hidden_z, heads_z=heads_z, mha_z=mha_z, intermediate_z=intermediate_z, ffn_z=ffn_z) x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x, hidden_z) # if hidden_z is not None: # x = torch.mul(x, hidden_z) x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) x = self.get_proj_feature(x) if embed_dim_z is not None: x = x.mul(embed_dim_z) if normalized: x = F.normalize(x, dim=-1) return x def get_proj_feature(self, x): return x @ self.text_projection def forward(self, text, normalized=False): mask = dict() if self.l0_module is not None: mask = self.l0_module.forward() self.mask = mask return self.encode_text(text, normalized=normalized, **mask) def prune(self): device = self.token_embedding.weight.device if self.hidden_z is None: self.hidden_z = torch.ones( self.text_projection.size(0), device=device) if self.embed_dim_z is None: self.embed_dim_z = torch.ones( self.text_projection.size(1), device=device) mod = self self_copy = copy.deepcopy(self) hidden_r = self.hidden_z > 0 mod.token_embedding = nn.Embedding( self_copy.token_embedding.weight.shape[0], hidden_r.sum()) mod.positional_embedding = nn.Parameter( torch.empty(self_copy.context_length, hidden_r.sum())) mod.token_embedding.weight = nn.Parameter( (self_copy.token_embedding.weight * self_copy.hidden_z.view(1, -1))[:, hidden_r]) mod.positional_embedding = nn.Parameter( (self_copy.positional_embedding * self_copy.hidden_z.view(1, -1))[:, hidden_r]) mod.transformer = self.transformer.prune() mod.ln_final = self.ln_final.prune() embed_dim_r = self.embed_dim_z > 0 mod.text_projection = nn.Parameter( (self.text_projection * self.hidden_z.view(-1, 1) * self.embed_dim_z.view(1, -1))[hidden_r][:, embed_dim_r]) return mod class LogitScale(nn.Module): def __init__(self): super().__init__() self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) def forward(self, dummy): return self.logit_scale class FNBlock(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, *args, **kwargs): return self.fn(*args, **kwargs) class FakeDDP(nn.Module): def __init__(self, module): super().__init__() self.module = module def forward(self, *args, **kwargs): return self.module(*args, **kwargs) class CLIPBase(nn.Module): def __init__(self, image_encoder, text_encoder): super().__init__() self._image_encoder = image_encoder self._text_encoder = text_encoder self._logit_scale = LogitScale() # autocast context self.image_autocast = nullcontext self.text_autocast = nullcontext self.logit_autocast = nullcontext # copy the module without ddp self._without_ddp = [self._image_encoder, self._text_encoder, self._logit_scale] self.used_ddp = False def set_autocast(self, image_autocast, text_autocast, logit_autocast): self.image_autocast = image_autocast self.text_autocast = text_autocast self.logit_autocast = logit_autocast @property def image_encoder_without_ddp(self): return self._without_ddp[0] @image_encoder_without_ddp.setter def image_encoder_without_ddp(self, encoder): assert self.used_ddp is False self._image_encoder = encoder self._without_ddp[0] = self._image_encoder @property def text_encoder_without_ddp(self): return self._without_ddp[1] @text_encoder_without_ddp.setter def text_encoder_without_ddp(self, encoder): assert self.used_ddp is False self._text_encoder = encoder self._without_ddp[1] = self._text_encoder @property def logit_scale_without_ddp(self): return self._without_ddp[2] @logit_scale_without_ddp.setter def logit_scale_without_ddp(self, logit_scale): assert self.used_ddp is False self._logit_scale = logit_scale self._without_ddp[2] = self._logit_scale @property def visual(self): return self.image_encoder_without_ddp.visual @property def transformer(self): return self.text_encoder_without_ddp.transformer @property def text_encoder_without_ddp(self): return self._without_ddp[1] @property def logit_scale_without_ddp(self): return self._without_ddp[2] def get_teacher(self): return self.teacher[0] def use_teacher_image(self): def teacher_image_encoder_fn(image, normalized=False): teacher = self.get_teacher() with torch.no_grad(): return teacher.encode_image(image, normalized=normalized) self._image_encoder = FNBlock(teacher_image_encoder_fn) class EmptyVisual(nn.Module): def __init__(self): super().__init__() self.layers = 0 self._image_encoder.visual = EmptyVisual() self._without_ddp[0] = self._image_encoder def use_teacher_text(self): def teacher_text_encoder_fn(text, normalized=False): teacher = self.get_teacher() with torch.no_grad(): return teacher.encode_text(text, normalized=normalized) self._text_encoder = FNBlock(teacher_text_encoder_fn) class EmptyTransformer(nn.Module): def __init__(self): super().__init__() self.layers = 0 self._text_encoder.transformer = EmptyTransformer() self._text_encoder.token_embedding = None self._without_ddp[1] = self._text_encoder def ddpify(self, ddp_fn): def _ddp_fn(module): cnt = sum([p.numel() for p in module.parameters() if p.requires_grad]) if cnt > 0: return ddp_fn(module) return FakeDDP(module) self._image_encoder = _ddp_fn(self.image_encoder_without_ddp) self._text_encoder = _ddp_fn(self.text_encoder_without_ddp) self._logit_scale = _ddp_fn(self.logit_scale_without_ddp) self.used_ddp = True def forward(self, image, text, normalized=True): image_features = text_features = None if image is not None: with self.image_autocast(): image_features = self._image_encoder( image, normalized=normalized) if text is not None: with self.text_autocast(): text_features = self._text_encoder(text, normalized=normalized) with self.logit_autocast(): logit_scale = self._logit_scale(torch.tensor(0)) return image_features, text_features, logit_scale.exp() def encode_image(self, image, normalized=False): with self.image_autocast(): return self._image_encoder(image, normalized=normalized) def encode_text(self, text, normalized=False): with self.text_autocast(): return self._text_encoder(text, normalized=normalized) @property def logit_scale(self): return self.logit_scale_without_ddp.logit_scale def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): assert unlocked_groups == 0, 'partial locking not currently supported for this model' tower = self.image_encoder_without_ddp for param in tower.parameters(): param.requires_grad = False if freeze_bn_stats: freeze_batch_norm_2d(tower) def lock_text_tower(self, unlocked_groups=0, freeze_bn_stats=False): assert unlocked_groups == 0, 'partial locking not currently supported for this model' tower = self.text_encoder_without_ddp for param in tower.parameters(): param.requires_grad = False if freeze_bn_stats: freeze_batch_norm_2d(tower) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): visual = self.image_encoder_without_ddp.visual transformer = self.text_encoder_without_ddp.transformer if hasattr(visual, 'set_grad_checkpointing'): visual.set_grad_checkpointing(enable) if transformer is not None and hasattr(transformer, 'set_grad_checkpointing'): transformer.set_grad_checkpointing(enable) def image_named_params(self): return self._image_encoder.named_parameters() def text_named_params(self): return self._text_encoder.named_parameters() def joint_named_params(self): return self._logit_scale.named_parameters() def load_state_dict(self, state_dict, strict=True): state_dict = convert_to_new_checkpoint(state_dict, self.used_ddp) if not any(k.startswith('_image_encoder') for k in state_dict.keys()): self.use_teacher_image() for m in ['module.', '']: flag = f'_image_encoder.{m}visual.model.head.0.weight' if flag in state_dict: # LN state_dict[f'_image_encoder.{m}visual.ln_post.weight'] = state_dict.pop( f'_image_encoder.{m}visual.model.head.0.weight') state_dict[f'_image_encoder.{m}visual.ln_post.bias'] = state_dict.pop( f'_image_encoder.{m}visual.model.head.0.bias') # FC state_dict[f'_image_encoder.{m}visual.proj'] = state_dict.pop( f'_image_encoder.{m}visual.model.head.1.weight').T new_state_dict = state_dict.copy() for k, v in new_state_dict.items(): if '.module' in k: state_dict[k.replace('.module', '')] = v state_dict.pop(k) super().load_state_dict(state_dict, strict=strict) class CLIP(CLIPBase): def __init__( self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool = False, mask_image: bool = False, mask_text: bool = False, sparsity_warmup: int = 1000, sparsity: float = 0.25, start_sparsity: float = 0.0, ): vision_ocfg = None text_ocfg = None if isinstance(vision_cfg, dict): vision_ocfg = vision_cfg.pop('configs', None) vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(text_cfg, dict): text_ocfg = text_cfg.pop('configs', None) text_cfg = CLIPTextCfg(**text_cfg) mask_cfg = Namespace() mask_cfg.sparsity_warmup = sparsity_warmup mask_cfg.sparsity = sparsity mask_cfg.start_sparsity = start_sparsity if vision_ocfg is None: image_encoder = ImageEncoder(embed_dim, vision_cfg, quick_gelu, l0_module_image=mask_image, mask_cfg=mask_cfg) if text_ocfg is None: text_encoder = TextEncoder(embed_dim, text_cfg, quick_gelu, l0_module_text=mask_text, mask_cfg=mask_cfg) super().__init__(image_encoder, text_encoder) def convert_to_new_checkpoint(state_dict, used_ddp=False): if '_logit_scale.module.logit_scale' in state_dict: if not used_ddp: new_checkpoint = dict() for k, v in state_dict.items(): sp = k.split('.') assert sp[1] == 'module', (sp, state_dict.keys()) k = '.'.join(sp[:1] + sp[2:]) new_checkpoint[k] = v state_dict = new_checkpoint return state_dict if '_logit_scale.logit_scale' in state_dict: if used_ddp: new_checkpoint = dict() for k, v in state_dict.items(): sp = k.split('.') k = '.'.join(sp[:1] + ['module'] + sp[1:]) new_checkpoint[k] = v state_dict = new_checkpoint return state_dict image_prefix = '_image_encoder.' text_prefix = '_text_encoder.' logit_scale_prefix = '_logit_scale.' if used_ddp: image_prefix += 'module.' text_prefix += 'module.' logit_scale_prefix += 'module.' new_checkpoint = dict() if 'module.logit_scale' in state_dict: # remove the prefix module state_dict = {k[len('module.'):]: v for k, v in state_dict.items()} if 'logit_scale' in state_dict: # old CLIP checkpoint for k, v in state_dict.items(): if k.startswith('visual.'): new_checkpoint[image_prefix + k] = v elif k == 'logit_scale': new_checkpoint[logit_scale_prefix + 'logit_scale'] = v else: new_checkpoint[text_prefix + k] = v else: new_checkpoint = state_dict return new_checkpoint def convert_weights_to_fp16(model: nn.Module): """Convert applicable model parameters to fp16""" def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() if isinstance(l, (nn.MultiheadAttention, )): for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: tensor = getattr(l, attr) if tensor is not None: tensor.data = tensor.data.half() for name in ["text_projection", "proj"]: if hasattr(l, name): attr = getattr(l, name) if attr is not None: attr.data = attr.data.half() model.apply(_convert_weights_to_fp16) def build_model_from_openai_state_dict(state_dict: dict): vit = "visual.proj" in state_dict if vit: vision_width = state_dict["visual.conv1.weight"].shape[0] vision_layers = len( [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] grid_size = round( (state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) image_size = vision_patch_size * grid_size else: counts: list = [ len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] vision_layers = tuple(counts) vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] output_width = round( (state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) vision_patch_size = None assert output_width ** 2 + \ 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] image_size = output_width * 32 embed_dim = state_dict["text_projection"].shape[1] context_length = state_dict["positional_embedding"].shape[0] vocab_size = state_dict["token_embedding.weight"].shape[0] transformer_width = state_dict["ln_final.weight"].shape[0] transformer_heads = transformer_width // 64 transformer_layers = len(set( k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) vision_cfg = CLIPVisionCfg( layers=vision_layers, width=vision_width, patch_size=vision_patch_size, image_size=image_size, ) text_cfg = CLIPTextCfg( context_length=context_length, vocab_size=vocab_size, width=transformer_width, heads=transformer_heads, layers=transformer_layers ) model = CLIP( embed_dim, vision_cfg=vision_cfg, text_cfg=text_cfg, quick_gelu=True, # OpenAI models were trained with QuickGELU ) for key in ["input_resolution", "context_length", "vocab_size"]: state_dict.pop(key, None) convert_weights_to_fp16(model) model.load_state_dict(state_dict) return model.eval() def trace_model(model, batch_size=256, device=torch.device('cpu')): model.eval() image_size = model.visual.image_size example_images = torch.ones( (batch_size, 3, image_size, image_size), device=device) example_text = torch.zeros( (batch_size, model.context_length), dtype=torch.int, device=device) model = torch.jit.trace_module( model, inputs=dict( forward=(example_images, example_text), encode_text=(example_text,), encode_image=(example_images,) )) model.visual.image_size = image_size return model def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1): # Rescale the grid of position embeddings when loading from state_dict old_pos_embed = state_dict.get('visual.positional_embedding', None) if old_pos_embed is None or not hasattr(model.visual, 'grid_size'): return grid_size = to_2tuple(model.visual.grid_size) # FIXME detect different token configs (ie no class token, or more) extra_tokens = 1 new_seq_len = grid_size[0] * grid_size[1] + extra_tokens if new_seq_len == old_pos_embed.shape[0]: return if extra_tokens: pos_emb_tok, pos_emb_img = old_pos_embed[: extra_tokens], old_pos_embed[extra_tokens:] else: pos_emb_tok, pos_emb_img = None, old_pos_embed old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img)))) logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size) pos_emb_img = pos_emb_img.reshape( 1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2) pos_emb_img = F.interpolate( pos_emb_img, size=grid_size, mode=interpolation, align_corners=True, ) pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape( 1, grid_size[0] * grid_size[1], -1)[0] if pos_emb_tok is not None: new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0) else: new_pos_embed = pos_emb_img state_dict['visual.positional_embedding'] = new_pos_embed @torch.no_grad() def load_pruned_model(model, pruned_state_dict): ''' A full model loads the pruned state dict. Inputs: model_state_dict: the full model weights pruned_state_dict: the pruned model weights ''' def _copy_to_full_weight(dst, src): assert dst.ndim == src.ndim, (dst.ndim, src.ndim) dst.zero_() dims = src.shape if len(dims) == 0: dst.copy_(src) else: slices = [slice(0, d) for d in dims] dst[slices].copy_(src) lambda_init_value = 10.0 model_state_dict = model.state_dict() head_dim = model.transformer.head_dim pruned_state_dict = {k.replace('image_encoder_without_ddp', '_image_encoder'). replace('text_encoder_without_ddp', '_text_encoder'): v for k, v in pruned_state_dict.items()} for name, dst in model_state_dict.items(): # auto weight inheritance model weight prefix dst_shape = dst.shape # copy weights if name in pruned_state_dict: src = pruned_state_dict[name] if 'attn.in_proj_weight' in name: # reshape: (3 * num_heads * head_dim, embed_dim) -> (3, num_heads, head_dim, embed_dim) assert len(src.shape) == 2 _copy_to_full_weight(dst.view(3, -1, head_dim, dst_shape[-1]), src.view(3, -1, head_dim, src.shape[-1])) elif 'attn.in_proj_bias' in name: # reshape: (3 * num_heads * head_dim,) -> (3, num_heads, head_dim) assert len(src.shape) == 1 _copy_to_full_weight(dst.view(3, -1, head_dim), src.view(3, -1, head_dim)) else: _copy_to_full_weight(dst, src) else: if '.resblocks.' in name: # the layer has been pruned. dst.zero_() model_state_dict['_logit_scale.logit_scale'] = pruned_state_dict['_logit_scale.logit_scale'] # prune hidden dimensions encoder_names = ['_image_encoder', '_text_encoder'] hidden_size_img = pruned_state_dict['_image_encoder.visual.ln_pre.weight'].shape[0] hidden_size_txt = pruned_state_dict['_text_encoder.positional_embedding'].shape[1] hidden_sizes = [hidden_size_img, hidden_size_txt] for ename, hidden_size in zip(encoder_names, hidden_sizes): # reset lambda in l0 module model_state_dict[f'{ename}.l0_module.lambda_1'].fill_( lambda_init_value) model_state_dict[f'{ename}.l0_module.lambda_2'].fill_( lambda_init_value) # prune the last dimensions model_state_dict[f'{ename}.l0_module.hidden_loga'][hidden_size:].fill_( -lambda_init_value) def _get_layer_id(name): return int(name.split('resblocks.')[1].split('.')[0]) for ename in encoder_names: # get the depth of the encoder encoder_keys = list(k for k in model_state_dict.keys() if ename in k) encoder_depth = max(_get_layer_id(k) for k in encoder_keys if 'resblocks' in k) + 1 pruned_encoder_keys = list( k for k in pruned_state_dict.keys() if ename in k) in_proj_weight_shapes = [None for _ in range(encoder_depth)] mlp_c_fc_shapes = [None for _ in range(encoder_depth)] for k in pruned_encoder_keys: if 'in_proj_weight' in k: d = _get_layer_id(k) in_proj_weight_shapes[d] = pruned_state_dict[k].shape elif 'mlp.c_fc.weight' in k: d = _get_layer_id(k) mlp_c_fc_shapes[d] = pruned_state_dict[k].shape for d in range(encoder_depth): # set heads_loga if in_proj_weight_shapes[d] is not None: num_heads = in_proj_weight_shapes[d][0] // head_dim // 3 model_state_dict[f'{ename}.l0_module.heads_loga'][d, num_heads:].fill_(-lambda_init_value) else: # all heads have been pruned model_state_dict[f'{ename}.l0_module.heads_loga'][d, :].fill_(-lambda_init_value) # set intermediate_loga if mlp_c_fc_shapes[d] is not None: inter_size = mlp_c_fc_shapes[d][0] model_state_dict[f'{ename}.l0_module.intermediate_loga'][d, inter_size:].fill_(-lambda_init_value) else: # all intermediate dimensions have been pruned model_state_dict[f'{ename}.l0_module.intermediate_loga'][d, :].fill_(-lambda_init_value) model.load_state_dict(model_state_dict, strict=True)
Cream/TinyCLIP/src/open_clip/model.py/0
{ "file_path": "Cream/TinyCLIP/src/open_clip/model.py", "repo_id": "Cream", "token_count": 27603 }
318
from itertools import repeat import collections.abc from torch import nn as nn from torchvision.ops.misc import FrozenBatchNorm2d def freeze_batch_norm_2d(module, module_match={}, name=''): """ Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. module_match (dict): Dictionary of full module names to freeze (all if empty) name (str): Full module name (prefix) Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module is_match = True if module_match: is_match = name in module_match if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)): res = FrozenBatchNorm2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for child_name, child in module.named_children(): full_child_name = '.'.join( [name, child_name]) if name else child_name new_child = freeze_batch_norm_2d( child, module_match, full_child_name) if new_child is not child: res.add_module(child_name, new_child) return res # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse to_1tuple = _ntuple(1) to_2tuple = _ntuple(2) to_3tuple = _ntuple(3) to_4tuple = _ntuple(4) def to_ntuple(n, x): return _ntuple(n)(x)
Cream/TinyCLIP/src/open_clip/utils.py/0
{ "file_path": "Cream/TinyCLIP/src/open_clip/utils.py", "repo_id": "Cream", "token_count": 950 }
319
import json import logging import math import os import psutil import functools import time from collections import defaultdict import numpy as np import torch from torch import optim import torch.nn.functional as F from timm.utils import get_state_dict from torch.utils.data._utils.collate import default_collate from collections import UserDict try: import wandb except ImportError: wandb = None from open_clip import ClipLoss from open_clip.clip_soft_loss import ClipSoftLoss from timm.utils.model import unwrap_model from .distributed import is_master from .zero_shot import zero_shot_eval from .precision import get_autocast from training.optimizer import build_optimizer from training.scheduler import cosine_lr, cosine_lr_start, step_lr, cosine_lr_start_nowarmup import torch.distributed as dist from training.my_meter import AverageMeter, reduce_tensor def _stack2cat(items): if isinstance(items, torch.Tensor): shape = items.shape shape = (shape[0] * shape[1],) + shape[2:] return items.view(shape) elif isinstance(items, (list, tuple)): return [_stack2cat(e) for e in items] elif isinstance(items, (dict, UserDict)): return {k: _stack2cat(v) for k, v in items.items()} else: raise TypeError(f'Unsupported type {type(items)}') def cat_items(items): # items: [Tensor, Tensor, ...] -> Tensor, # [(Tensor, Tensor), (Tensor, Tensor)] -> (Tensor, Tensor) # [(Tensor, [Tensor, Tensor]), (Tensor, [Tensor, Tensor])] -> (Tensor, [Tensor, Tensor]) items = default_collate(items) # stack of items # stack -> cat items = _stack2cat(items) return items def infer_chunks(fn, x, times): if times == 1: return fn(x) ys = [] for e in x.chunk(times): ys.append(fn(e)) return cat_items(ys) def check_last_batch(it): ''' input: iterator return: (item, is_last_batch) ''' last = None for x in it: if last is not None: yield last, False last = x if last is not None: yield last, True NAN_LOSS_CNT = 0 def train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, scheduler_l0, args, tb_writer=None, start_iter=0, zs=None): global NAN_LOSS_CNT device = torch.device(args.device) autocast = get_autocast(args.precision) image_autocast = get_autocast(args.image_precision) text_autocast = get_autocast(args.text_precision) logit_autocast = get_autocast(args.logit_precision) model.set_autocast( image_autocast=image_autocast, text_autocast=text_autocast, logit_autocast=logit_autocast) teacher_autocast = torch.cuda.amp.autocast model_without_ddp = unwrap_model(model) distillation = args.distillation if distillation: teacher_model = model_without_ddp.teacher[0] model.train() loss_kwargs = dict( local_loss=args.local_loss, gather_with_grad=args.gather_with_grad, cache_labels=True, rank=args.rank, world_size=args.world_size, use_horovod=args.horovod) if start_iter == 0: # set epoch in process safe manner via sampler or shared_epoch data['train'].set_epoch(epoch) dataloader = data['train'].dataloader dataloader.device = args.device if distillation: soft_loss_fn = ClipSoftLoss(**loss_kwargs) # , ignore_diag=True) else: soft_loss_fn = None hard_loss_fn = ClipLoss(**loss_kwargs) dataloader, sampler = data['train'].dataloader, data['train'].sampler if args.distributed and sampler is not None and start_iter == 0: # [DO NOT REMOVE IT] it will call set_epoch even if sampler is not a DistributedSampler. sampler.set_epoch(epoch) num_batches_per_epoch = dataloader.num_batches sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10)) loss_m = AverageMeter() metrics = defaultdict(AverageMeter) end = time.time() batch_size = dataloader.batch_size samples_per_epoch = dataloader.num_samples total_batch_size = batch_size * args.world_size num_feed_images = samples_per_epoch * epoch + start_iter * total_batch_size num_feed_images_after_epoch = samples_per_epoch * (epoch + 1) all_num_feed_images = ( int(samples_per_epoch * args.epochs) // total_batch_size * total_batch_size) # for float epoch is_last_epoch = (epoch + 1 >= args.epochs) samples_per_epoch_r = samples_per_epoch if not is_last_epoch else all_num_feed_images - \ epoch * samples_per_epoch num_batches_per_epoch_r = samples_per_epoch_r // total_batch_size eval_freq = int(os.getenv('EVAL_FREQ', 1000)) save_freq = int(os.getenv('SAVE_FREQ', 1000)) # define model_fn and loss_fn infer_teacher_image = True def loss_fn(student_outputs, teacher_outputs): image_features = student_outputs['image_features'] text_features = student_outputs['text_features'] logit_scale = student_outputs['logit_scale'] teacher_image_features = teacher_outputs['image_features'] teacher_text_features = teacher_outputs['text_features'] teacher_logit_scale = teacher_outputs['logit_scale'] labels = teacher_outputs['labels'] losses = dict() if distillation: if args.distillation_alpha > 0.0 and args.distillation_weight > 0.0: soft_loss_weight = args.distillation_alpha * args.distillation_weight img2text_loss, text2img_loss = soft_loss_fn(image_features, text_features, logit_scale, teacher_image_features, teacher_text_features, teacher_logit_scale, labels=labels, average_two_losses=False, ) img2text_loss *= 0.5 * soft_loss_weight text2img_loss *= 0.5 * soft_loss_weight soft_loss = img2text_loss + text2img_loss losses['soft_loss'] = soft_loss metrics['soft_img2text_loss'].update(img2text_loss.item()) metrics['soft_text2img_loss'].update(text2img_loss.item()) # Hard Loss if args.distillation_alpha < 1.0 and args.distillation_weight > 0.0: hard_loss = hard_loss_fn(image_features, text_features, logit_scale) *\ ((1.0 - args.distillation_alpha) * args.distillation_weight) losses['hard_loss'] = hard_loss else: losses['loss'] = hard_loss_fn( image_features, text_features, logit_scale) total_loss = 0 for k, v in losses.items(): metrics[k].update(v.item()) assert v.requires_grad, k total_loss += v return total_loss def grad_cache_loss_fn(student_outputs, teacher_outputs): image_features, text_features, logit_scale = student_outputs student_outputs = dict( image_features=image_features, text_features=text_features, logit_scale=logit_scale, ) return loss_fn(student_outputs, teacher_outputs) gpu_mem_info = torch.cuda.mem_get_info() gpu_memory_used = (gpu_mem_info[1] - gpu_mem_info[0]) / (1024 ** 3) metrics['gpu_memory'].update(gpu_memory_used) cpu_mem_info = psutil.virtual_memory() cpu_memory_used = cpu_mem_info.used / (1024 ** 3) metrics['cpu_memory'].update(cpu_memory_used) rest_shm = psutil.disk_usage('/dev/shm').free / (1024 ** 3) metrics['rest_shm'].update(rest_shm) def forward_backward_fn(model, images, texts, outputs_no_grad): image_feat_no_grad, text_feat_no_grad, logit_scale_no_grad = outputs_no_grad if args.lock_image: images = None if args.lock_text: texts = None with autocast(): image_feat, text_feat, logit_scale = model( images, texts, normalized=True) if image_feat is None: image_feat = image_feat_no_grad if text_feat is None: text_feat = text_feat_no_grad return image_feat, text_feat, logit_scale def naive_model_fn(student_inputs, teacher_outputs, total_loss_flag=True): images, texts = student_inputs with autocast(): # clean outputs first to avoid the error when using MXS outputs_no_grad = [None, None, None] student_outputs = forward_backward_fn( model, images, texts, outputs_no_grad) del images, texts, student_inputs loss = grad_cache_loss_fn(student_outputs, teacher_outputs) use_image_mask = getattr( model.image_encoder_without_ddp, 'l0_module', None) is not None use_text_mask = getattr( model.text_encoder_without_ddp, 'l0_module', None) is not None if total_loss_flag and use_image_mask and use_text_mask: img_mask = model.image_encoder_without_ddp.l0_module txt_mask = model.text_encoder_without_ddp.l0_module all_para_txt = txt_mask.prunable_model_size all_para_img = img_mask.prunable_model_size remain_para_txt = txt_mask.get_num_parameters_and_constraint( "hidden" in txt_mask.types) remain_para_img = img_mask.get_num_parameters_and_constraint( "hidden" in img_mask.types) expected_sparsity = 1 - \ (remain_para_txt + remain_para_img) / \ (all_para_txt + all_para_img) target_sparsity_img = img_mask.get_target_sparsity( step) if img_mask.lagrangian_warmup > 0 else img_mask.target_sparsity target_sparsity_txt = txt_mask.get_target_sparsity( step) if txt_mask.lagrangian_warmup > 0 else txt_mask.target_sparsity target_sparsity = (target_sparsity_img + target_sparsity_txt) / 2 lambda_1_ = (img_mask.lambda_1 + txt_mask.lambda_1) / 2 lambda_2_ = (img_mask.lambda_2 + txt_mask.lambda_2) / 2 zero = torch.tensor(0.0, device=expected_sparsity.device) total_lagrangian_loss = ( lambda_1_ * torch.maximum(target_sparsity - expected_sparsity, zero) + lambda_2_ * torch.maximum(target_sparsity - expected_sparsity, zero).square() ) loss = loss + total_lagrangian_loss metrics['all_expected_sparsity'].update(expected_sparsity) metrics['vision_expected_sparsity'].update( 1 - remain_para_img / all_para_img) metrics['text_expected_sparsity'].update( 1 - remain_para_txt / all_para_txt) metrics['all_target_sparsity'].update(target_sparsity) metrics['all_lagran_loss'].update(total_lagrangian_loss) else: if use_image_mask: lagran_loss, expected_sparsity, target_sparsity = \ model.image_encoder_without_ddp.l0_module.lagrangian_regularization( step) loss = loss + lagran_loss metrics['vision_expected_sparsity'].update( expected_sparsity) metrics['vision_target_sparsity'].update(target_sparsity) metrics['vision_lagran_loss'].update(lagran_loss) if use_text_mask: lagran_loss, expected_sparsity, target_sparsity = \ model.text_encoder_without_ddp.l0_module.lagrangian_regularization( step) loss = loss + lagran_loss metrics['text_expected_sparsity'].update(expected_sparsity) metrics['text_target_sparsity'].update(target_sparsity) metrics['text_lagran_loss'].update(lagran_loss) scaler.scale(loss).backward() return loss grad_cache = naive_model_fn def teacher_image_fn(images): feat = teacher_model.encode_image(images) outputs = torch.tensor([]) return F.normalize(feat, dim=-1), outputs def teacher_text_fn(texts): feat = teacher_model.encode_text(texts) outputs = torch.tensor([]) return F.normalize(feat, dim=-1), outputs for (i, batch), is_last_batch in check_last_batch(enumerate(dataloader, start=start_iter)): step = num_batches_per_epoch * epoch + i num_feed_images += total_batch_size if step == args.prune_step and model.image_encoder_without_ddp.l0_module is not None and model.text_encoder_without_ddp.l0_module is not None: logging.info('=== FUSE MASK IMAGE ===') num_params_before_fuse = sum( p.numel() for p in model.image_encoder_without_ddp.parameters() if p.requires_grad) with torch.no_grad(): model.image_encoder_without_ddp.eval() image = torch.randn((1, 3, 224, 224), device='cuda') model.image_encoder_without_ddp(image) model.image_encoder_without_ddp = model.image_encoder_without_ddp.prune() assert hasattr(model.image_encoder_without_ddp, 'l0_module') model.image_encoder_without_ddp.l0_module = None num_params_after_fuse = sum( p.numel() for p in model.image_encoder_without_ddp.parameters() if p.requires_grad) logging.info( f'=> fuse MASK image: {num_params_before_fuse} -> {num_params_after_fuse}') logging.info('=== FUSE MASK TEXT ===') num_params_before_fuse = sum( p.numel() for p in model.text_encoder_without_ddp.parameters() if p.requires_grad) with torch.no_grad(): model.text_encoder_without_ddp.eval() text = torch.randint(0, 100, (1, 77), device='cuda') model.text_encoder_without_ddp(text) model.text_encoder_without_ddp = model.text_encoder_without_ddp.prune() assert hasattr(model.text_encoder_without_ddp, 'l0_module') model.text_encoder_without_ddp.l0_module = None num_params_after_fuse = sum( p.numel() for p in model.text_encoder_without_ddp.parameters() if p.requires_grad) logging.info( f'=> fuse MASK text: {num_params_before_fuse} -> {num_params_after_fuse}') # results = evaluate(model, data, epoch, args) if args.distributed and not args.horovod: if args.use_bn_sync: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm( model) ddp_args = {} if args.ddp_static_graph: # this doesn't exist in older PyTorch, arg only added if enabled ddp_args['static_graph'] = True ddp_fn = functools.partial( torch.nn.parallel.DistributedDataParallel, device_ids=[device], **ddp_args) model.ddpify(ddp_fn) model_without_ddp = model args.prune_image = False args.prune_text = False use_mask = False optimizer = build_optimizer(args, model) scheduler = cosine_lr_start_nowarmup( optimizer[0:3], args.lr, num_batches_per_epoch * args.epochs, args.prune_step) scheduler(step) if scheduler_l0 != None: scheduler_l0(step) if len(batch) == 2: images, texts = batch images = images.to(device, non_blocking=True) texts = texts.to(device, non_blocking=True) labels = None else: images, texts, labels = batch images = images.to(device, non_blocking=True) texts = texts.to(device, non_blocking=True) labels = labels.to(device, non_blocking=True) metrics['data_time'].update(time.time() - end) for opt in optimizer: opt.zero_grad() if distillation: # infer teacher if args.logit_scale is not None: teacher_model.logit_scale.fill_(math.log(args.logit_scale)) with teacher_autocast(): with torch.no_grad(): if infer_teacher_image: teacher_image_features, teacher_image_outputs = infer_chunks( teacher_image_fn, images, 1) else: teacher_image_features = teacher_image_outputs = None teacher_text_features, teacher_text_outputs = infer_chunks( teacher_text_fn, texts, 1) teacher_logit_scale = teacher_model.logit_scale.exp() else: teacher_image_features = teacher_image_outputs = None teacher_text_features = teacher_text_outputs = None teacher_logit_scale = None grad_norm = None # detach and it has been backwarded infer_student_image = not args.use_teacher_image infer_student_text = not args.use_teacher_text student_inputs = [] for x, used in zip([images, texts], [infer_student_image, infer_student_text]): if used: student_inputs.append(x) else: student_inputs.append(None) use_mask = args.prune_image or args.prune_text used_optimizer = [] for opt, used in zip(optimizer, [ infer_student_image and not args.lock_image, infer_student_text and not args.lock_text, True, use_mask ]): if used: used_optimizer.append(opt) # append optimizer teacher_outputs = dict( image_features=teacher_image_features, text_features=teacher_text_features, logit_scale=teacher_logit_scale, image_outputs=teacher_image_outputs, text_outputs=teacher_text_outputs, labels=labels, ) total_loss = grad_cache( student_inputs, teacher_outputs=teacher_outputs, total_loss_flag=args.total_loss_flag) skip_this_step = False # check nan loss if not torch.isfinite(total_loss): NAN_LOSS_CNT += 1 if NAN_LOSS_CNT > 100: print( f'WARNING: non-finite loss, ending training loss: {total_loss}') return 'non-finite loss' skip_this_step = True print( f'WARNING: non-finite loss, skip this step. loss: {total_loss}, nan_loss_cnt: {NAN_LOSS_CNT}') else: NAN_LOSS_CNT = 0 ''' a potential bug: there are three branches: image, text, logit each optimizer has its own `found_inf_per_device`. The three `found_inf_per_device` should be synced, otherwise a branch will be updated with wrong gradients? ''' # check loss for opt in used_optimizer: scaler.unscale_(opt) # sync found_inf_per_device found_inf = sum( sum(v.item() for v in scaler._per_optimizer_states[id( opt)]['found_inf_per_device'].values()) for opt in used_optimizer ) if found_inf > 0: for opt in used_optimizer: for v in scaler._per_optimizer_states[id(opt)]['found_inf_per_device'].values(): v.fill_(True) if args.norm_gradient_clip is not None: grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), args.norm_gradient_clip, norm_type=2.0) # evaluate(model, data, epoch, args, tb_writer, step=step, num_feed_images=num_feed_images) if not skip_this_step: for opt in used_optimizer: scaler.step(opt) scaler.update() if getattr(model.image_encoder_without_ddp, 'l0_module', None) is not None: model._image_encoder.module.l0_module.constrain_parameters() metrics['vision_lambda1'].update( model._image_encoder.module.l0_module.lambda_1.detach().item()) metrics['vision_lambda2'].update( model._image_encoder.module.l0_module.lambda_2.detach().item()) if getattr(model.text_encoder_without_ddp, 'l0_module', None) is not None: model._text_encoder.module.l0_module.constrain_parameters() metrics['text_lambda1'].update( model._text_encoder.module.l0_module.lambda_1.detach().item()) metrics['text_lambda2'].update( model._text_encoder.module.l0_module.lambda_2.detach().item()) loss_scale = scaler.state_dict()["scale"] metrics['loss_scale'].update(loss_scale) # Note: we clamp to 4.6052 = ln(100), as in the original paper. with torch.no_grad(): if args.logit_scale is not None: model_without_ddp.logit_scale.fill_(math.log(args.logit_scale)) else: model_without_ddp.logit_scale.clamp_(0, math.log(100)) batch_time_cost = time.time() - end metrics['batch_time'].update(batch_time_cost) end = time.time() if batch_time_cost > 0: metrics['throughput'].update(total_batch_size / batch_time_cost) batch_count = i + 1 if is_master(args) and (i % 10 == 0 or is_last_batch): num_samples = batch_count * total_batch_size percent_complete = 100.0 * batch_count / num_batches_per_epoch # NOTE loss is coarsely sampled, just master node and per log update loss_m.update(total_loss.item(), batch_size) logit_scale_scalar = model_without_ddp.logit_scale.exp().item() metrics_str = '' for k, v in metrics.items(): metrics_str += '{}: {:.4f} ({:.4f})\t'.format(k, v.val, v.avg) logging.info( f"Train Epoch: {epoch} [{batch_count}/{num_batches_per_epoch_r}] [{num_samples:>{sample_digits}}/{samples_per_epoch_r} ({percent_complete:.0f}%)] " f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) " f"{metrics_str} " f"LR: {optimizer[0].param_groups[0]['lr']:5f} " f"Logit Scale: {logit_scale_scalar:.3f}" ) # Save train loss / etc. Using non avg meter values as loggers have their own smoothing log_data = { "loss": loss_m.val, "scale": logit_scale_scalar, "lr": optimizer[0].param_groups[0]["lr"], "lr_l0": optimizer[-1].param_groups[0]["lr"] } for k, v in metrics.items(): log_data[k] = v.val for name, val in log_data.items(): name = "train/" + name if tb_writer is not None: tb_writer.add_scalar(name, val, step) if args.wandb: assert wandb is not None, 'Please install wandb.' wandb.log({name: val, 'step': step, 'num_feed_images': num_feed_images}, step=step) if i > 2000: eval_freq = 500 do_evaluate = ((i + 1) % eval_freq == 0 or is_last_batch) do_save_checkpoint = ((i + 1) % save_freq == 0 or is_last_batch) use_mask = args.prune_image or args.prune_text if step == 0 and use_mask: do_evaluate = True if ((i + 1) % eval_freq == 0 or is_last_batch) or step == 0: from training.viz import plot if args.prune_image: model.eval() layers = model._image_encoder.module.l0_module.num_hidden_layers hidden_size = model._image_encoder.module.l0_module.hidden_size heads = model._image_encoder.module.l0_module.num_attention_heads l0device = model._image_encoder.module.l0_module.z_logas[ model._image_encoder.module.l0_module.types[0]].device zs_img = model._image_encoder.module.l0_module() sparsity_img = model._image_encoder.module.l0_module.calculate_model_size(zs_img)[ 'pruned_sparsity'] if 'mha_z' not in zs_img.keys(): zs_img['mha_z'] = torch.ones([layers]).to(l0device) if 'ffn_z' not in zs_img.keys(): zs_img['ffn_z'] = torch.ones([layers]).to(l0device) if 'hidden_z' not in zs_img.keys(): zs_img['hidden_z'] = torch.ones([hidden_size]).to(l0device) if 'heads_z' not in zs_img.keys(): zs_img['heads_z'] = torch.ones( [layers, 1, heads, 1, 1]).to(l0device) if 'intermediate_z' not in zs_img.keys(): zs_img['intermediate_z'] = torch.ones( [layers, 1, 1, hidden_size * 4]).to(l0device) hidden_img = zs_img['hidden_z'].detach( ).cpu().squeeze().numpy() heads_img = zs_img['mha_z'].detach().cpu().squeeze().numpy( ).reshape(-1, 1) * zs_img['heads_z'].detach().cpu().squeeze().numpy() intermediates_img = zs_img['ffn_z'].detach().cpu().squeeze().numpy( ).reshape(-1, 1) * zs_img['intermediate_z'].detach().cpu().squeeze().numpy() fig_img = plot(heads_img, intermediates_img, f"Sparsity_img: {sparsity_img:.2%}") if dist.get_rank() == 0 and args.wandb: wandb.log({ "test/sparsity_img": sparsity_img, "pruned_structure_img": fig_img }, step=step) model.train() if args.prune_text: model.eval() layers = model._text_encoder.module.l0_module.num_hidden_layers hidden_size = model._text_encoder.module.l0_module.hidden_size heads = model._text_encoder.module.l0_module.num_attention_heads l0device = model._text_encoder.module.l0_module.z_logas[ model._text_encoder.module.l0_module.types[0]].device zs_txt = model._text_encoder.module.l0_module() sparsity_txt = model._text_encoder.module.l0_module.calculate_model_size(zs_txt)[ 'pruned_sparsity'] if 'mha_z' not in zs_txt.keys(): zs_txt['mha_z'] = torch.ones([layers]).to(l0device) if 'ffn_z' not in zs_txt.keys(): zs_txt['ffn_z'] = torch.ones([layers]).to(l0device) if 'hidden_z' not in zs_txt.keys(): zs_txt['hidden_z'] = torch.ones([hidden_size]).to(l0device) if 'heads_z' not in zs_txt.keys(): zs_txt['heads_z'] = torch.ones( [layers, 1, heads, 1, 1]).to(l0device) if 'intermediate_z' not in zs_txt.keys(): zs_txt['intermediate_z'] = torch.ones( [layers, 1, 1, hidden_size * 4]).to(l0device) hidden_txt = zs_txt['hidden_z'].detach( ).cpu().squeeze().numpy() heads_txt = zs_txt['mha_z'].detach().cpu().squeeze().numpy( ).reshape(-1, 1) * zs_txt['heads_z'].detach().cpu().squeeze().numpy() intermediates_txt = zs_txt['ffn_z'].detach().cpu().squeeze().numpy( ).reshape(-1, 1) * zs_txt['intermediate_z'].detach().cpu().squeeze().numpy() fig_txt = plot(heads_txt, intermediates_txt, f"Sparsity_txt: {sparsity_txt:.2%}") if dist.get_rank() == 0 and args.wandb: wandb.log({ "test/sparsity_txt": sparsity_txt, "pruned_structure_txt": fig_txt }, step=step) model.train() if do_evaluate: if any(v in data for v in ('val', 'imagenet-val', 'imagenet-v2')): evaluate(model, data, epoch, args, tb_writer, step=step, num_feed_images=num_feed_images) model.train() if do_save_checkpoint and is_master(args): # Saving checkpoints. if args.save_logs: num_batches = len(dataloader) samples_per_epoch = dataloader.num_samples checkpoint_dict = { "args": args, "epoch": epoch, "iter_in_epoch": i, "num_batches": num_batches, "samples_per_epoch": samples_per_epoch, "name": args.name, "state_dict": model.state_dict(), "optimizer": [opt.state_dict() for opt in optimizer], } if scaler is not None: checkpoint_dict["scaler"] = scaler.state_dict() # Model EMA if hasattr(model_without_ddp, '_model_ema'): ema_models_state = [get_state_dict( model_ema) for model_ema in model_without_ddp._model_ema] checkpoint_dict['model_emas'] = ema_models_state checkpoint_fname = os.path.join( args.checkpoint_path, f"epoch_{epoch}_iter_{i}.pt") torch.save( checkpoint_dict, checkpoint_fname, ) print(f"Save checkpoint to {checkpoint_fname}") if num_feed_images >= all_num_feed_images: break print( f'Feed ALL Data: {num_feed_images}/{num_feed_images_after_epoch}/{all_num_feed_images}') return model, optimizer, scaler, scheduler, scheduler_l0, args # end for def evaluate(model, data, epoch, args, tb_writer=None, step=None, num_feed_images=None): metrics = {} models = [model] names = [''] assert len(names) == len(models) for name, model_i in zip(names, models): model_i.eval() zero_shot_metrics = zero_shot_eval(model_i, data, epoch, args) zero_shot_metrics = dict((name + k, v) for k, v in zero_shot_metrics.items()) metrics.update(zero_shot_metrics) if not metrics: return metrics if not is_master(args): return metrics logging.info( f"Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()]) ) if args.save_logs: for name, val in metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(metrics)) f.write("\n") if args.wandb: assert wandb is not None, 'Please install wandb.' for name, val in metrics.items(): log = {f"val/{name}": val, 'epoch': epoch} extra_kwargs = dict() if step is not None: log['step'] = step extra_kwargs['step'] = step if num_feed_images is not None: log['num_feed_images'] = num_feed_images wandb.log(log, **extra_kwargs) return metrics def get_metrics(image_features, text_features, logit_scale): metrics = {} logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu() logits_per_text = logits_per_image.t().detach().cpu() logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text} ground_truth = torch.arange(len(text_features)).view(-1, 1) for name, logit in logits.items(): ranking = torch.argsort(logit, descending=True) preds = torch.where(ranking == ground_truth)[1] preds = preds.detach().cpu().numpy() metrics[f"{name}_mean_rank"] = preds.mean() + 1 metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1 for k in [1, 5, 10]: metrics[f"{name}_R@{k}"] = np.mean(preds < k) return metrics
Cream/TinyCLIP/src/training/train.py/0
{ "file_path": "Cream/TinyCLIP/src/training/train.py", "repo_id": "Cream", "token_count": 16617 }
320
MODEL: NAME: TinyViT-11M-22kto1k TYPE: tiny_vit DROP_PATH_RATE: 0.0 TINY_VIT: DEPTHS: [ 2, 2, 6, 2 ] NUM_HEADS: [ 2, 4, 8, 14 ] WINDOW_SIZES: [ 7, 7, 14, 7 ] EMBED_DIMS: [64, 128, 256, 448] TRAIN: EPOCHS: 30 WARMUP_EPOCHS: 5 BASE_LR: 2.5e-4 WEIGHT_DECAY: 1e-8 MIN_LR: 1e-5 LAYER_LR_DECAY: 0.8 EVAL_BN_WHEN_TRAINING: True AUG: MIXUP: 0.0 CUTMIX: 0.0
Cream/TinyViT/configs/22kto1k/tiny_vit_11m_22kto1k.yaml/0
{ "file_path": "Cream/TinyViT/configs/22kto1k/tiny_vit_11m_22kto1k.yaml", "repo_id": "Cream", "token_count": 243 }
321
import os import multiprocessing import torch import torch.distributed as dist import numpy as np from .aug_random import AugRandomContext from .manager import TxtManager def get_rank(): if not dist.is_available() or not dist.is_initialized(): return 0 return dist.get_rank() class DatasetWrapper(torch.utils.data.Dataset): def __init__(self, dataset, logits_path, topk, write): super().__init__() self.dataset = dataset self.logits_path = logits_path self.epoch = multiprocessing.Value('i', 0) self.topk = topk self.write_mode = write self.keys = self._get_keys() self._manager = (None, None) def __getitem__(self, index: int): if self.write_mode: return self.__getitem_for_write(index) return self.__getitem_for_read(index) def __getitem_for_write(self, index: int): # get an augmentation seed key = self.keys[index] seed = np.int32(np.random.randint(0, 1 << 31)) with AugRandomContext(seed=int(seed)): item = self.dataset[index] return (item, (key, seed)) def __getitem_for_read(self, index: int): key = self.keys[index] seed, logits_index, logits_value = self._get_saved_logits(key) with AugRandomContext(seed=seed): item = self.dataset[index] return (item, (logits_index, logits_value, np.int32(seed))) def _get_saved_logits(self, key: str): manager = self.get_manager() bstr: bytes = manager.read(key) # parse the augmentation seed seed = int(np.frombuffer(bstr[:4], dtype=np.int32)) # parse the logits index and value # copy logits_index and logits_value to avoid warning of written flag from PyTorch bstr = bstr[4:] logits_index = np.frombuffer( bstr[:self.topk * 2], dtype=np.int16).copy() bstr = bstr[self.topk * 2:] logits_value = np.frombuffer( bstr[:self.topk * 2], dtype=np.float16).copy() return seed, logits_index, logits_value def _build_manager(self, logits_path: str): # topk * [idx, value] * 2 bytes for logits + 4 bytes for seed item_size = self.topk * 2 * 2 + 4 rank = get_rank() return TxtManager(logits_path, item_size, rank) def set_epoch(self, epoch: int): self.epoch.value = epoch self._manager = (None, None) def get_manager(self): epoch = self.epoch.value if epoch != self._manager[0]: logits_path = os.path.join( self.logits_path, f'logits_top{self.topk}_epoch{self.epoch.value}') self._manager = (epoch, self._build_manager(logits_path)) return self._manager[1] def __len__(self): return len(self.dataset) def _get_keys(self): if hasattr(self.dataset, 'get_keys'): keys = self.dataset.get_keys() if self.write_mode: # we only check key unique in the write mode assert len(keys) == len(set(keys)), 'keys must be unique' return keys return [str(i) for i in range(len(self))]
Cream/TinyViT/data/augmentation/dataset_wrapper.py/0
{ "file_path": "Cream/TinyViT/data/augmentation/dataset_wrapper.py", "repo_id": "Cream", "token_count": 1448 }
322
""" Tensorflow Preprocessing Adapter Allows use of Tensorflow preprocessing pipeline in PyTorch Transform Copyright of original Tensorflow code below. Hacked together by / Copyright 2020 Ross Wightman """ # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ImageNet preprocessing for MnasNet.""" import tensorflow as tf import numpy as np IMAGE_SIZE = 224 CROP_PADDING = 32 def distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): """Generates cropped_image using one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image_bytes: `Tensor` of binary image data. bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: cropped image `Tensor` """ with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): shape = tf.image.extract_jpeg_shape(image_bytes) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, _ = sample_distorted_bounding_box # Crop the image to the specified bounding box. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image def _at_least_x_are_equal(a, b, x): """At least `x` of `a` and `b` `Tensors` are equal.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x) def _decode_and_random_crop(image_bytes, image_size, resize_method): """Make a random crop of image_size.""" bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = distorted_bounding_box_crop( image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(3. / 4, 4. / 3.), area_range=(0.08, 1.0), max_attempts=10, scope=None) original_shape = tf.image.extract_jpeg_shape(image_bytes) bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) image = tf.cond( bad, lambda: _decode_and_center_crop(image_bytes, image_size), lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) return image def _decode_and_center_crop(image_bytes, image_size, resize_method): """Crops to center of image with padding then scales image_size.""" shape = tf.image.extract_jpeg_shape(image_bytes) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast( ((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = ((image_height - padded_center_crop_size) + 1) // 2 offset_width = ((image_width - padded_center_crop_size) + 1) // 2 crop_window = tf.stack([offset_height, offset_width, padded_center_crop_size, padded_center_crop_size]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) image = tf.image.resize([image], [image_size, image_size], resize_method)[0] return image def _flip(image): """Random horizontal image flip.""" image = tf.image.random_flip_left_right(image) return image def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): """Preprocesses the given image for evaluation. Args: image_bytes: `Tensor` representing an image binary of arbitrary size. use_bfloat16: `bool` for whether to use bfloat16. image_size: image size. interpolation: image interpolation method Returns: A preprocessed image `Tensor`. """ resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR image = _decode_and_random_crop(image_bytes, image_size, resize_method) image = _flip(image) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype( image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) return image def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): """Preprocesses the given image for evaluation. Args: image_bytes: `Tensor` representing an image binary of arbitrary size. use_bfloat16: `bool` for whether to use bfloat16. image_size: image size. interpolation: image interpolation method Returns: A preprocessed image `Tensor`. """ resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR image = _decode_and_center_crop(image_bytes, image_size, resize_method) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype( image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) return image def preprocess_image(image_bytes, is_training=False, use_bfloat16=False, image_size=IMAGE_SIZE, interpolation='bicubic'): """Preprocesses the given image. Args: image_bytes: `Tensor` representing an image binary of arbitrary size. is_training: `bool` for whether the preprocessing is for training. use_bfloat16: `bool` for whether to use bfloat16. image_size: image size. interpolation: image interpolation method Returns: A preprocessed image `Tensor` with value range of [0, 255]. """ if is_training: return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) else: return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) class TfPreprocessTransform: def __init__(self, is_training=False, size=224, interpolation='bicubic'): self.is_training = is_training self.size = size[0] if isinstance(size, tuple) else size self.interpolation = interpolation self._image_bytes = None self.process_image = self._build_tf_graph() self.sess = None def _build_tf_graph(self): with tf.device('/cpu:0'): self._image_bytes = tf.placeholder( shape=[], dtype=tf.string, ) img = preprocess_image( self._image_bytes, self.is_training, False, self.size, self.interpolation) return img def __call__(self, image_bytes): if self.sess is None: self.sess = tf.Session() img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) img = img.round().clip(0, 255).astype(np.uint8) if img.ndim < 3: img = np.expand_dims(img, axis=-1) img = np.rollaxis(img, 2) # HWC to CHW return img
Cream/TinyViT/data/augmentation/tf_preprocessing.py/0
{ "file_path": "Cream/TinyViT/data/augmentation/tf_preprocessing.py", "repo_id": "Cream", "token_count": 3751 }
323
from .build import build_model
Cream/TinyViT/models/__init__.py/0
{ "file_path": "Cream/TinyViT/models/__init__.py", "repo_id": "Cream", "token_count": 8 }
324
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ COCO dataset which returns image_id for evaluation. Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py """ from pathlib import Path from typing import List, Any import torch import torch.utils.data import torchvision from pycocotools import mask as coco_mask from PIL import Image from io import BytesIO import os import zipfile import datasets.transforms as T ZIPS = dict() def get_zip_handle(fname): global ZIPS if fname not in ZIPS: handle = zipfile.ZipFile(fname, 'r') ZIPS[fname] = handle return ZIPS[fname] READ_IMAGE_IF_EXISTED = True def my_open(root, fname): global READ_IMAGE_IF_EXISTED ''' root: xxx/train2017 fname: file ''' root = str(root) if READ_IMAGE_IF_EXISTED: image_fname = os.path.join(root, fname) try: return open(image_fname, 'rb').read() except: # switch to reading zip file because image file not found READ_IMAGE_IF_EXISTED = False zip_fname = root + '.zip' handle = get_zip_handle(zip_fname) base_name = os.path.basename(root) zname = f'{base_name}/{fname}' return handle.read(zname) def my_Image_open(root, fname): ''' root: xxx/train2017 fname: file ''' iob = BytesIO(my_open(root, fname)) return Image.open(iob) class CocoDetection(torchvision.datasets.CocoDetection): def __init__(self, img_folder, ann_file, transforms, return_masks): super(CocoDetection, self).__init__(img_folder, ann_file) self._transforms = transforms self.prepare = ConvertCocoPolysToMask(return_masks) def __getitem__(self, idx): id = self.ids[idx] img = self._load_image(id) target = self._load_target(id) if self.transforms is not None: img, target = self.transforms(img, target) image_id = self.ids[idx] target = {'image_id': image_id, 'annotations': target} img, target = self.prepare(img, target) if self._transforms is not None: img, target = self._transforms(img, target) return img, target def _load_image(self, id: int) -> Image.Image: path = self.coco.loadImgs(id)[0]["file_name"] return my_Image_open(self.root, path).convert('RGB') def _load_target(self, id) -> List[Any]: return self.coco.loadAnns(self.coco.getAnnIds(id)) def convert_coco_poly_to_mask(segmentations, height, width): masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = torch.as_tensor(mask, dtype=torch.uint8) mask = mask.any(dim=2) masks.append(mask) if masks: masks = torch.stack(masks, dim=0) else: masks = torch.zeros((0, height, width), dtype=torch.uint8) return masks class ConvertCocoPolysToMask(object): def __init__(self, return_masks=False): self.return_masks = return_masks def __call__(self, image, target): w, h = image.size image_id = target["image_id"] image_id = torch.tensor([image_id]) anno = target["annotations"] anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2].clamp_(min=0, max=w) boxes[:, 1::2].clamp_(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = torch.tensor(classes, dtype=torch.int64) if self.return_masks: segmentations = [obj["segmentation"] for obj in anno] masks = convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = torch.as_tensor(keypoints, dtype=torch.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.view(num_keypoints, -1, 3) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if self.return_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["labels"] = classes if self.return_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = torch.tensor([obj["area"] for obj in anno]) iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = torch.as_tensor([int(h), int(w)]) target["size"] = torch.as_tensor([int(h), int(w)]) return image, target def make_coco_transforms(image_set): normalize = T.Compose([ T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800] if image_set == 'train': return T.Compose([ T.RandomHorizontalFlip(), T.RandomSelect( T.RandomResize(scales, max_size=1333), T.Compose([ T.RandomResize([400, 500, 600]), T.RandomSizeCrop(384, 600), T.RandomResize(scales, max_size=1333), ]) ), normalize, ]) if image_set == 'val': return T.Compose([ T.RandomResize([800], max_size=1333), normalize, ]) raise ValueError(f'unknown {image_set}') def build(image_set, args): root = Path(args.coco_path) assert root.exists(), f'provided COCO path {root} does not exist' mode = 'instances' PATHS = { "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), } img_folder, ann_file = PATHS[image_set] dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks) return dataset
Cream/iRPE/DETR-with-iRPE/datasets/coco.py/0
{ "file_path": "Cream/iRPE/DETR-with-iRPE/datasets/coco.py", "repo_id": "Cream", "token_count": 3082 }
325
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ This file provides the definition of the convolutional heads used to predict masks, as well as the losses """ import io from collections import defaultdict from typing import List, Optional import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from PIL import Image import util.box_ops as box_ops from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list try: from panopticapi.utils import id2rgb, rgb2id except ImportError: pass class DETRsegm(nn.Module): def __init__(self, detr, freeze_detr=False): super().__init__() self.detr = detr if freeze_detr: for p in self.parameters(): p.requires_grad_(False) hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) def forward(self, samples: NestedTensor): if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, pos = self.detr.backbone(samples) bs = features[-1].tensors.shape[0] src, mask = features[-1].decompose() assert mask is not None src_proj = self.detr.input_proj(src) hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) outputs_class = self.detr.class_embed(hs) outputs_coord = self.detr.bbox_embed(hs).sigmoid() out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} if self.detr.aux_loss: out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord) # FIXME h_boxes takes the last one computed, keep this in mind bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) out["pred_masks"] = outputs_seg_masks return out def _expand(tensor, length: int): return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) class MaskHeadSmallConv(nn.Module): """ Simple convolutional head, using group norm. Upsampling is done using a FPN approach """ def __init__(self, dim, fpn_dims, context_dim): super().__init__() inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = torch.nn.GroupNorm(8, dim) self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = F.relu(x) x = self.lay2(x) x = self.gn2(x) x = F.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay3(x) x = self.gn3(x) x = F.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay4(x) x = self.gn4(x) x = F.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay5(x) x = self.gn5(x) x = F.relu(x) x = self.out_lay(x) return x class MHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) nn.init.zeros_(self.k_linear.bias) nn.init.zeros_(self.q_linear.bias) nn.init.xavier_uniform_(self.k_linear.weight) nn.init.xavier_uniform_(self.q_linear.weight) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, q, k, mask: Optional[Tensor] = None): q = self.q_linear(q) k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) if mask is not None: weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class PostProcessSegm(nn.Module): def __init__(self, threshold=0.5): super().__init__() self.threshold = threshold @torch.no_grad() def forward(self, results, outputs, orig_target_sizes, max_target_sizes): assert len(orig_target_sizes) == len(max_target_sizes) max_h, max_w = max_target_sizes.max(0)[0].tolist() outputs_masks = outputs["pred_masks"].squeeze(2) outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): img_h, img_w = t[0], t[1] results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) results[i]["masks"] = F.interpolate( results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" ).byte() return results class PostProcessPanoptic(nn.Module): """This class converts the output of the model to the final panoptic result, in the format expected by the coco panoptic API """ def __init__(self, is_thing_map, threshold=0.85): """ Parameters: is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether the class is a thing (True) or a stuff (False) class threshold: confidence threshold: segments with confidence lower than this will be deleted """ super().__init__() self.threshold = threshold self.is_thing_map = is_thing_map def forward(self, outputs, processed_sizes, target_sizes=None): """ This function computes the panoptic prediction from the model's predictions. Parameters: outputs: This is a dict coming directly from the model. See the model doc for the content. processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the model, ie the size after data augmentation but before batching. target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size of each prediction. If left to None, it will default to the processed_sizes """ if target_sizes is None: target_sizes = processed_sizes assert len(processed_sizes) == len(target_sizes) out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] assert len(out_logits) == len(raw_masks) == len(target_sizes) preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.cpu().tolist()) for cur_logits, cur_masks, cur_boxes, size, target_size in zip( out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes ): # we filter empty queries and detection below threshold scores, labels = cur_logits.softmax(-1).max(-1) keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) cur_scores = cur_scores[keep] cur_classes = cur_classes[keep] cur_masks = cur_masks[keep] cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) h, w = cur_masks.shape[-2:] assert len(cur_boxes) == len(cur_classes) # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.flatten(1) stuff_equiv_classes = defaultdict(lambda: []) for k, label in enumerate(cur_classes): if not self.is_thing_map[label.item()]: stuff_equiv_classes[label.item()].append(k) def get_ids_area(masks, scores, dedup=False): # This helper function creates the final panoptic segmentation image # It also returns the area of the masks that appears on the image m_id = masks.transpose(0, 1).softmax(-1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) else: m_id = m_id.argmax(-1).view(h, w) if dedup: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): if len(equiv) > 1: for eq_id in equiv: m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) final_h, final_w = to_tuple(target_size) seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) np_seg_img = ( torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() ) m_id = torch.from_numpy(rgb2id(np_seg_img)) area = [] for i in range(len(scores)): area.append(m_id.eq(i).sum().item()) return area, seg_img area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) if cur_classes.numel() > 0: # We know filter empty masks as long as we find some while True: filtered_small = torch.as_tensor( [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device ) if filtered_small.any().item(): cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] cur_masks = cur_masks[~filtered_small] area, seg_img = get_ids_area(cur_masks, cur_scores) else: break else: cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) segments_info = [] for i, a in enumerate(area): cat = cur_classes[i].item() segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) del cur_classes with io.BytesIO() as out: seg_img.save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} preds.append(predictions) return preds
Cream/iRPE/DETR-with-iRPE/models/segmentation.py/0
{ "file_path": "Cream/iRPE/DETR-with-iRPE/models/segmentation.py", "repo_id": "Cream", "token_count": 7422 }
326
from .build import build_dataloader from .imagenet.real_labels import RealLabelsImagenet
CvT/lib/dataset/__init__.py/0
{ "file_path": "CvT/lib/dataset/__init__.py", "repo_id": "CvT", "token_count": 29 }
327
from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import timedelta from pathlib import Path import os import logging import shutil import time import tensorwatch as tw import torch import torch.backends.cudnn as cudnn from utils.comm import comm from ptflops import get_model_complexity_info def setup_logger(final_output_dir, rank, phase): time_str = time.strftime('%Y-%m-%d-%H-%M') log_file = '{}_{}_rank{}.txt'.format(phase, time_str, rank) final_log_file = os.path.join(final_output_dir, log_file) head = '%(asctime)-15s:[P:%(process)d]:' + comm.head + ' %(message)s' logging.basicConfig( filename=str(final_log_file), format=head ) logger = logging.getLogger() logger.setLevel(logging.INFO) console = logging.StreamHandler() console.setFormatter( logging.Formatter(head) ) logging.getLogger('').addHandler(console) def create_logger(cfg, cfg_name, phase='train'): root_output_dir = Path(cfg.OUTPUT_DIR) dataset = cfg.DATASET.DATASET cfg_name = cfg.NAME final_output_dir = root_output_dir / dataset / cfg_name print('=> creating {} ...'.format(root_output_dir)) root_output_dir.mkdir(parents=True, exist_ok=True) print('=> creating {} ...'.format(final_output_dir)) final_output_dir.mkdir(parents=True, exist_ok=True) print('=> setup logger ...') setup_logger(final_output_dir, cfg.RANK, phase) return str(final_output_dir) def init_distributed(args): args.num_gpus = int(os.environ["WORLD_SIZE"]) \ if "WORLD_SIZE" in os.environ else 1 args.distributed = args.num_gpus > 1 if args.distributed: print("=> init process group start") torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group( backend="nccl", init_method="env://", timeout=timedelta(minutes=180)) comm.local_rank = args.local_rank print("=> init process group end") def setup_cudnn(config): cudnn.benchmark = config.CUDNN.BENCHMARK torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC torch.backends.cudnn.enabled = config.CUDNN.ENABLED def count_parameters(model): params = sum(p.numel() for p in model.parameters() if p.requires_grad) return params/1000000 def summary_model_on_master(model, config, output_dir, copy): if comm.is_main_process(): this_dir = os.path.dirname(__file__) shutil.copy2( os.path.join(this_dir, '../models', config.MODEL.NAME + '.py'), output_dir ) logging.info('=> {}'.format(model)) try: num_params = count_parameters(model) logging.info("Trainable Model Total Parameter: \t%2.1fM" % num_params) except Exception: logging.error('=> error when counting parameters') if config.MODEL_SUMMARY: try: logging.info('== model_stats by tensorwatch ==') df = tw.model_stats( model, (1, 3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0]) ) df.to_html(os.path.join(output_dir, 'model_summary.html')) df.to_csv(os.path.join(output_dir, 'model_summary.csv')) msg = '*'*20 + ' Model summary ' + '*'*20 logging.info( '\n{msg}\n{summary}\n{msg}'.format( msg=msg, summary=df.iloc[-1] ) ) logging.info('== model_stats by tensorwatch ==') except Exception: logging.error('=> error when run model_stats') try: logging.info('== get_model_complexity_info by ptflops ==') macs, params = get_model_complexity_info( model, (3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0]), as_strings=True, print_per_layer_stat=True, verbose=True ) logging.info(f'=> FLOPs: {macs:<8}, params: {params:<8}') logging.info('== get_model_complexity_info by ptflops ==') except Exception: logging.error('=> error when run get_model_complexity_info') def resume_checkpoint(model, optimizer, config, output_dir, in_epoch): best_perf = 0.0 begin_epoch_or_step = 0 checkpoint = os.path.join(output_dir, 'checkpoint.pth')\ if not config.TRAIN.CHECKPOINT else config.TRAIN.CHECKPOINT if config.TRAIN.AUTO_RESUME and os.path.exists(checkpoint): logging.info( "=> loading checkpoint '{}'".format(checkpoint) ) checkpoint_dict = torch.load(checkpoint, map_location='cpu') best_perf = checkpoint_dict['perf'] begin_epoch_or_step = checkpoint_dict['epoch' if in_epoch else 'step'] state_dict = checkpoint_dict['state_dict'] model.load_state_dict(state_dict) optimizer.load_state_dict(checkpoint_dict['optimizer']) logging.info( "=> {}: loaded checkpoint '{}' ({}: {})" .format(comm.head, checkpoint, 'epoch' if in_epoch else 'step', begin_epoch_or_step) ) return best_perf, begin_epoch_or_step def save_checkpoint_on_master(model, *, distributed, model_name, optimizer, output_dir, in_epoch, epoch_or_step, best_perf): if not comm.is_main_process(): return states = model.module.state_dict() \ if distributed else model.state_dict() logging.info('=> saving checkpoint to {}'.format(output_dir)) save_dict = { 'epoch' if in_epoch else 'step': epoch_or_step + 1, 'model': model_name, 'state_dict': states, 'perf': best_perf, 'optimizer': optimizer.state_dict(), } try: torch.save(save_dict, os.path.join(output_dir, 'checkpoint.pth')) except Exception: logging.error('=> error when saving checkpoint!') def save_model_on_master(model, distributed, out_dir, fname): if not comm.is_main_process(): return try: fname_full = os.path.join(out_dir, fname) logging.info(f'=> save model to {fname_full}') torch.save( model.module.state_dict() if distributed else model.state_dict(), fname_full ) except Exception: logging.error('=> error when saving checkpoint!') def strip_prefix_if_present(state_dict, prefix): keys = sorted(state_dict.keys()) if not all(key.startswith(prefix) for key in keys): return state_dict from collections import OrderedDict stripped_state_dict = OrderedDict() for key, value in state_dict.items(): stripped_state_dict[key.replace(prefix, "")] = value return stripped_state_dict
CvT/lib/utils/utils.py/0
{ "file_path": "CvT/lib/utils/utils.py", "repo_id": "CvT", "token_count": 3469 }
328
import sys sys.path.append('../') import unittest import numpy as np import pandas as pd import shutil import os import invoker class TestErrorInput(unittest.TestCase): def setUp(self): self.__input_path = './functional_test_input_folder' self.__input_csv_file = './functional_test_input_file.csv' self.__input_parquet_file = './functional_test_input_file.parquet' self.__detect_mode = 'AnomalyOnly' self.__timestamp_column = 'timestamp' self.__value_column = 'value' self.__batch_size = 2000 self.__threshold = 0.3 self.__sensitivity = 99 self.__append_mode = True self.__output_path = './functional_test_output_directory' def tearDown(self): self.deleteDataFrameDirectory() def deleteDataFrameDirectory(self): if os.path.exists(self.__input_path): shutil.rmtree(self.__input_path) if os.path.exists(self.__input_csv_file): os.remove(self.__input_csv_file) if os.path.exists(self.__input_parquet_file): os.remove(self.__input_parquet_file) if os.path.exists(self.__output_path): shutil.rmtree(self.__output_path) def generate_input_data_frame(self, start_date: str = '2020-01-01'): df = pd.DataFrame() df['timestamp'] = pd.date_range(start=start_date, periods=200, freq='1D') df['value'] = np.sin(np.linspace(1, 20, 200)) return df def generate_input_folder(self, file_type: str = 'csv'): if not os.path.isdir(self.__input_path): os.mkdir(self.__input_path) start_dates = ['2018-01-01', '2019-01-01', '2020-01-01'] for start_date in start_dates: df = self.generate_input_data_frame(start_date) if file_type == 'csv': df.to_csv(f"{self.__input_path}/{start_date}.csv", index=False) elif file_type == 'parquet': df.to_parquet(f"{self.__input_path}/{start_date}.parquet", index=False) else: raise Exception(f'Unsupported input data type {file_type}, only csv and parquet file are allowed') def testAnomalyOnlyModeCsvFile(self): df = self.generate_input_data_frame() df.to_csv(self.__input_csv_file, index=False) invoker.invoke(self.__input_csv_file, self.__detect_mode, self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 200) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' not in result.columns) self.assertTrue('upperBoundary' not in result.columns) self.assertTrue('lowerBoundary' not in result.columns) def testAnomalyOnlyModeCsvFolder(self): self.generate_input_folder() invoker.invoke(self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 600) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' not in result.columns) self.assertTrue('upperBoundary' not in result.columns) self.assertTrue('lowerBoundary' not in result.columns) def testAnomalyOnlyModeParquetFile(self): df = self.generate_input_data_frame() df.to_parquet(self.__input_parquet_file, index=False) invoker.invoke(self.__input_parquet_file, self.__detect_mode, self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 200) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' not in result.columns) self.assertTrue('upperBoundary' not in result.columns) self.assertTrue('lowerBoundary' not in result.columns) def testAnomalyOnlyModeParquetFolder(self): self.generate_input_folder('parquet') invoker.invoke(self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 600) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' not in result.columns) self.assertTrue('upperBoundary' not in result.columns) self.assertTrue('lowerBoundary' not in result.columns) def testAnomalyAndMarginCsvFile(self): df = self.generate_input_data_frame() df.to_csv(self.__input_csv_file, index=False) invoker.invoke(self.__input_csv_file, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 200) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) def testAnomalyAndMarginCsvFolder(self): self.generate_input_folder() invoker.invoke(self.__input_path, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 600) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) def testAnomalyAndMarginParquetFile(self): df = self.generate_input_data_frame() df.to_parquet(self.__input_parquet_file, index=False) invoker.invoke(self.__input_parquet_file, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 200) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) def testAnomalyAndMarginParquetFolder(self): self.generate_input_folder('parquet') invoker.invoke(self.__input_path, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 600) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) def testBatchModeCsvFile(self): df = self.generate_input_data_frame() df.to_csv(self.__input_csv_file, index=False) invoker.invoke(self.__input_csv_file, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, 66, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 200) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) def testBatchModeCsvFolder(self): self.generate_input_folder() invoker.invoke(self.__input_path, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, 66, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 600) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) def testBatchModeParquetFile(self): df = self.generate_input_data_frame() df.to_parquet(self.__input_parquet_file, index=False) invoker.invoke(self.__input_parquet_file, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, 66, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 200) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) def testBatchModeParquetFolder(self): self.generate_input_folder('parquet') invoker.invoke(self.__input_path, "AnomalyAndMargin", self.__timestamp_column, self.__value_column, 66, self.__threshold, self.__sensitivity, self.__append_mode, self.__output_path) result = pd.read_csv(f"{self.__output_path}/output.csv") self.assertEqual(result.shape[0], 600) self.assertTrue('value' in result.columns) self.assertTrue('isAnomaly' in result.columns) self.assertTrue('score' in result.columns) self.assertTrue('expectedValue' in result.columns) self.assertTrue('upperBoundary' in result.columns) self.assertTrue('lowerBoundary' in result.columns) if __name__ == '__main__': unittest.main()
anomalydetector/aml_component/tests/test_functionality.py/0
{ "file_path": "anomalydetector/aml_component/tests/test_functionality.py", "repo_id": "anomalydetector", "token_count": 5055 }
329
""" Copyright (C) Microsoft Corporation. All rights reserved.​ ​ Microsoft Corporation ("Microsoft") grants you a nonexclusive, perpetual, royalty-free right to use, copy, and modify the software code provided by us ("Software Code"). You may not sublicense the Software Code or any use of it (except to your affiliates and to vendors to perform work on your behalf) through distribution, network access, service agreement, lease, rental, or otherwise. This license does not purport to express any claim of ownership over data you may have shared with Microsoft in the creation of the Software Code. Unless applicable law gives you more rights, Microsoft reserves all other rights not expressly granted herein, whether by implication, estoppel or otherwise. ​ ​ THE SOFTWARE CODE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import argparse from srcnn.utils import * import numpy as np import os import time def auto(epoch): path_auto = os.getcwd() + '/auto.json' with open(path_auto, 'r+') as f: store = json.load(f) data = store['data'] window = store['window'] store['epoch'] = epoch with open(path_auto, 'w+') as f: json.dump(store, f) return data, window if __name__ == '__main__': parser = argparse.ArgumentParser(description='SRCNN') parser.add_argument('--data', type=str, required=True, help='location of the data file') parser.add_argument('--window', type=int, default=128, help='window size') parser.add_argument('--lr', type=int, default=1e-6, help='learning rate') parser.add_argument('--step', type=int, default=64, help='step') parser.add_argument('--seed', type=int, default=54321, help='random seed') parser.add_argument('--load', type=bool, default=False, help='load the existed model') parser.add_argument('--save', type=str, default='snapshot', help='path to save the model') parser.add_argument('--epoch', type=int, default=10) parser.add_argument('--batch_size', type=int, default=256, help='path to save the model') parser.add_argument('--num_workers', type=int, default=8, help='number of workers of pytorch') parser.add_argument('--model', type=str, default='sr_cnn', help='model') parser.add_argument('--auto', type=bool, default=False, help='Automatic filling parameters') args = parser.parse_args() if args.auto: data, window = auto(args.epoch) else: data, window = args.data, args.window torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) models = { 'sr_cnn': sr_cnn, } model = args.model root_path = os.getcwd() train_data_path = root_path + '/' + data + '_' + str(window) + '_train.json' model_path = root_path + '/' + args.save + '/' if args.load: load_path = root_path + '/' + args.load else: load_path = None total_time = 0 time_start = time.time() models[model](train_data_path, model_path, window, args.lr, args.epoch, args.batch_size, args.num_workers, load_path=load_path) time_end = time.time() total_time += time_end - time_start print('time used for training:', total_time, 'seconds')
anomalydetector/srcnn/train.py/0
{ "file_path": "anomalydetector/srcnn/train.py", "repo_id": "anomalydetector", "token_count": 1317 }
330
Archai
archai/.vscode/spellright.dict/0
{ "file_path": "archai/.vscode/spellright.dict", "repo_id": "archai", "token_count": 3 }
331
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Dict, Union, Optional, Any from pathlib import Path from azure.ai.ml import MLClient from azure.ai.ml.entities import AmlCompute, Environment from azure.identity import DefaultAzureCredential def get_aml_client_from_file(config_path: Union[str, Path]) -> MLClient: """ Creates an MLClient object from a workspace config file Args: config_path (Union[str, Path]): Path to the workspace config file Returns: MLClient: MLClient object """ credential = DefaultAzureCredential() config_path = Path(config_path) ml_client = MLClient.from_config( credential=credential, path=config_path ) return ml_client def create_compute_cluster( ml_client: MLClient, compute_name: str, type: Optional[str] = "amlcompute", size: Optional[str] = "Standard_D14_v2", min_instances: Optional[int] = 0, max_instances: Optional[int] = 4, idle_time_before_scale_down: Optional[int] = 180, tier: Optional[str] = "Dedicated", **kwargs): """ Creates a compute cluster for the workspace Args: ml_client (MLClient): MLClient object compute_name (str): Name of the (CPU/GPU) compute cluster type (str, optional): Type of the compute cluster. Defaults to "amlcompute". size (str, optional): VM Family of the compute cluster. Defaults to "Standard_D14_v2". min_instances (int, optional): Minimum running nodes when there is no job running. Defaults to 0. max_instances (int, optional): Maximum number of nodes in the cluster. Defaults to 4. idle_time_before_scale_down (int, optional): How many seconds will the node be allowed to run after the job termination. Defaults to 180. tier (str, optional): Dedicated or LowPriority. The latter is cheaper but there is a chance of job termination. Defaults to "Dedicated". Returns: Compute: Compute object """ try: compute_cluster = ml_client.compute.get(compute_name) print(f"You already have a cluster named {compute_name}, we'll reuse it as is.") except Exception: cpu_compute = AmlCompute( name=compute_name, type=type, size=size, min_instances=min_instances, max_instances=max_instances, idle_time_before_scale_down=idle_time_before_scale_down, tier=tier, **kwargs ) compute_cluster = ml_client.compute.begin_create_or_update(cpu_compute).result() print(f"AMLCompute with name {compute_cluster.name} is created, the compute size is {compute_cluster.size}") return compute_cluster def create_environment_from_file( ml_client: MLClient, custom_env_name: Optional[str] = "aml-archai", description: Optional[str] = "Custom environment for Archai", tags: Optional[Dict[str, Any]] = None, conda_file: Optional[str] = "conda.yaml", image: Optional[str] = None, version: Optional[str] = "0.1.0", **kwargs) -> Environment: """ Creates an environment from a conda file Args: ml_client (MLClient): MLClient object custom_env_name (str, optional): Name of the environment. Defaults to "aml-archai". description (str, optional): Description of the environment. Defaults to "Custom environment for Archai". tags (Dict[str, Any], optional): Tags for the environment, e.g. {"archai": "1.0.0"}. Defaults to None. conda_file (str, optional): Path to the conda file. Defaults to "conda.yaml". image (str, optional): Docker image for the environment. version (str, optional): Version of the environment. Defaults to "0.1.0". Returns: Environment: Environment object """ tags = tags or {"archai": "1.0.0"} archai_job_env = Environment( name=custom_env_name, description=description, tags=tags, conda_file=conda_file, image=image, version=version, **kwargs ) archai_job_env = ml_client.environments.create_or_update(archai_job_env) print( f"Environment with name {archai_job_env.name} is registered to workspace, the environment version is {archai_job_env.version}") return archai_job_env # TODO How can we return the path that the output was downloaded to? def download_job_output( ml_client: MLClient, job_name: str, output_name: str, download_path: Optional[Union[str, Path]] = "output") -> None: """ Downloads the output of a job Args: ml_client (MLClient): MLClient object job_name (str): Name of the job output_name (str): Named output to downlaod download_path (Union[str, Path], optional): Path to download the output to. Defaults to "output". Returns: None """ try: target_job = ml_client.jobs.get(job_name) except Exception as e: print(f"{e.error}") return None if target_job.status == "Completed": ml_client.jobs.download(name=target_job.name, download_path=Path(download_path), output_name=output_name) else: print(f"Job {target_job.name} is not completed yet")
archai/archai/common/azureml_helper.py/0
{ "file_path": "archai/archai/common/azureml_helper.py", "repo_id": "archai", "token_count": 2105 }
332
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from __future__ import annotations import gc import timeit from types import TracebackType from typing import Optional class MeasureBlockTime: """Context manager that measures the time elapsed in a block of code.""" def __init__(self, name: str, disable_gc: Optional[bool] = False, verbose: Optional[bool] = False) -> None: """Initilize the timer. Args: name: Name of the timer. disable_gc: Whether to disable the garbage collector during the time measurement. verbose: Whether to print the elapsed time when exiting the context manager. """ self.name = name self.disable_gc = disable_gc self.verbose = verbose def __enter__(self) -> MeasureBlockTime: self.is_gc_enabled = gc.isenabled() if self.disable_gc: gc.disable() self.start_time = timeit.default_timer() return self def __exit__(self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: TracebackType) -> None: if self.disable_gc and self.is_gc_enabled: gc.enable() if self.verbose: print(f"{self.name}: {self.elapsed:.4g} secs") return False @property def elapsed(self) -> float: """Return the elapsed time in seconds.""" return timeit.default_timer() - self.start_time
archai/archai/common/timing.py/0
{ "file_path": "archai/archai/common/timing.py", "repo_id": "archai", "token_count": 559 }
333
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import glob import os from typing import Optional, Tuple import numpy as np import torch from archai.common.file_utils import get_full_path from archai.common.ordered_dict_logger import OrderedDictLogger from archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer import BbpeTokenizer from archai.datasets.nlp.tokenizer_utils.gpt2_tokenizer import Gpt2Tokenizer from archai.datasets.nlp.tokenizer_utils.tokenizer_base import TokenizerBase from archai.datasets.nlp.tokenizer_utils.word_tokenizer import WordTokenizer logger = OrderedDictLogger(source=__name__) def _delete_file(file_path: str) -> bool: if os.path.isfile(file_path): os.remove(file_path) return True return False class Corpus: """Create and train the vocabulary/tokenizer, load the dataset and encode the data.""" def __init__( self, dataset_name: str, dataset_dir: str, cache_dir: str, vocab_type: str, vocab_size: Optional[int] = None, refresh_cache: Optional[bool] = False, ) -> None: """Initialize the `Corpus` class by defining attributes and creating cache-related paths. Args: dataset_name: Name of the dataset. dataset_dir: Path to the dataset folder. cache_dir: Path to the cache folder. vocab_type: Type of vocabulary/tokenizer. Valid options are `word`, `bbpe`, `gpt2`, or `bpe`. vocab_size: Vocabulary size. refresh_cache: Whether to refresh the cache. """ self.dataset_name = dataset_name self.dataset_dir = dataset_dir self.vocab_type = vocab_type self.vocab_size = vocab_size # Corpus cache is created using dataset/vocab_type/vocab_size path self.corpus_cache_dir = get_full_path( os.path.join(cache_dir, str(dataset_name), str(vocab_type), str(vocab_size)), create_folder=True ) # Encoded dataset (.npy files) cache paths self.train_cache_filepath = os.path.join(self.corpus_cache_dir, "train.npy") self.valid_cache_filepath = os.path.join(self.corpus_cache_dir, "valid.npy") self.test_cache_filepath = os.path.join(self.corpus_cache_dir, "test.npy") # Tokenizer-related files cache paths self.vocab_cache_dir = os.path.join(self.corpus_cache_dir, "vocab") self.refresh_cache = refresh_cache if refresh_cache: logger.info("Refreshing cache ...") self._clear_cache() @staticmethod def _create_vocab( dataset_name: str, vocab_type: str, vocab_cache_dir: str, vocab_size: Optional[int] = None ) -> TokenizerBase: if vocab_type == "word": bos_token, eos_token, lower_case = None, "<eos>", False if dataset_name in ["wt103", "wt2"] or dataset_name.startswith("olx_"): pass elif dataset_name == "ptb": lower_case = True elif dataset_name == "lm1b": bos_token, eos_token = "<S>", "<S>" # `<S>` is added for double EOS elif dataset_name in ["enwik8", "text8"]: eos_token, lower_case = None, True else: raise RuntimeError(f"Dataset: {dataset_name} is not supported yet.") vocab = WordTokenizer( save_path=vocab_cache_dir, vocab_size=vocab_size, bos_token=bos_token, eos_token=eos_token, lower_case=lower_case, ) elif vocab_type == "bbpe": vocab = BbpeTokenizer(save_path=vocab_cache_dir, vocab_size=vocab_size or 50257) elif vocab_type == "gpt2": # Default vocab_size for GPT-2 is 50257 vocab = Gpt2Tokenizer(save_path=vocab_cache_dir, vocab_size=vocab_size or 50257) else: raise RuntimeError(f"Vocabulary: {vocab_type} is not supported yet.") return vocab def _clear_cache(self) -> None: self.train = self.valid = self.test = self.vocab = None def _dataset_filepaths(self) -> Tuple[str, str, str]: train_file_name, valid_file_name, test_file_name = "train.txt", "valid.txt", "test.txt" if self.dataset_name in ["wt2", "wt103"]: train_file_name, valid_file_name, test_file_name = ( "wiki.train.tokens", "wiki.valid.tokens", "wiki.test.tokens", ) if self.dataset_name == "lm1b": train_path_pattern = os.path.join( self.dataset_dir, "1-billion-word-language-modeling-benchmark-r13output", "training-monolingual.tokenized.shuffled", "news.en-*", ) train_file_name_path = glob.glob(train_path_pattern) else: train_file_name_path = os.path.join(self.dataset_dir, train_file_name) valid_file_name_path = os.path.join(self.dataset_dir, valid_file_name) test_file_name_path = os.path.join(self.dataset_dir, test_file_name) return ( train_file_name_path, valid_file_name_path, test_file_name_path, ) def _train_vocab(self) -> None: # If vocabulary cache does not exist if self.refresh_cache or not self.vocab.is_trained(): logger.info("Training vocabulary ...") train_filepath, _, _ = self._dataset_filepaths() if not isinstance(train_filepath, list): train_filepath = [train_filepath] self.vocab.train(train_filepath) logger.info("Vocabulary trained.") else: self.vocab.load() logger.debug(f"Loading vocabulary ({self.vocab_type}, {self.vocab_size}) from: {self.vocab_cache_dir}") def _create_train_vocab(self) -> TokenizerBase: self.vocab = Corpus._create_vocab( self.dataset_name, self.vocab_type, self.vocab_cache_dir, vocab_size=self.vocab_size ) self._train_vocab() return self.vocab def _encode_files(self) -> None: train_filepath, valid_filepath, test_filepath = self._dataset_filepaths() if self.dataset_name == "lm1b": self.train = train_filepath else: self.train = self.vocab.encode_file(train_filepath) self.valid = self.vocab.encode_file(valid_filepath) self.test = self.vocab.encode_file(test_filepath) def train_and_encode(self) -> None: """Train the vocabulary/tokenizer and encodes the corpus.""" logger.info( f"Corpus: dataset = {self.dataset_name} | vocab_type = {self.vocab_type} | vocab_size = {self.vocab_size}" ) self._create_train_vocab() self._encode_files() train_size = f"{len(self.train)} files" if isinstance(self.train, list) else self.train.size(0) logger.debug(f"Size: train = {train_size} | valid = {self.valid.size(0)} | test = {self.test.size(0)}") def load(self) -> bool: """Load a pre-trained corpus. Returns: Whether pre-trained corpus has been successfully loaded. """ # Ensures tokenizer cache is loaded as well self.vocab = Corpus._create_vocab( self.dataset_name, self.vocab_type, self.vocab_cache_dir, vocab_size=self.vocab_size ) cache_exists = ( os.path.exists(self.train_cache_filepath) and os.path.exists(self.valid_cache_filepath) and os.path.exists(self.test_cache_filepath) ) # If .npy files exists, corpus cache is available if not self.refresh_cache and cache_exists and self.vocab is not None and self.vocab.is_trained(): logger.info(f"Loading cache from: {self.train_cache_filepath}") self.vocab.load() self.train = torch.from_numpy(np.load(self.train_cache_filepath)) self.valid = torch.from_numpy(np.load(self.valid_cache_filepath)) self.test = torch.from_numpy(np.load(self.test_cache_filepath)) logger.debug( f"Size: train = {self.train.size(0)} | valid = {self.valid.size(0)} | test = {self.test.size(0)}" ) return True logger.info("Clearing and rebuilding cache ...") self._clear_cache() _delete_file(self.train_cache_filepath) _delete_file(self.valid_cache_filepath) _delete_file(self.test_cache_filepath) return False def save_cache(self) -> None: """Save the cache.""" assert self.vocab is not None and self.vocab.is_trained() np.save(self.train_cache_filepath, self.train.numpy()) np.save(self.valid_cache_filepath, self.valid.numpy()) np.save(self.test_cache_filepath, self.test.numpy())
archai/archai/datasets/nlp/nvidia_dataset_provider_utils.py/0
{ "file_path": "archai/archai/datasets/nlp/nvidia_dataset_provider_utils.py", "repo_id": "archai", "token_count": 4171 }
334
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Any, Dict, Optional class ArchaiModel: """Model wrapper with an architecture identifier and an optional metadata dictionary.""" def __init__(self, arch: Any, archid: str, metadata: Optional[Dict[str, Any]] = None): """Initialize the Archai-based model. Args: arch: Model object (e.g torch.nn.Module). archid: String identifier of `arch` object. Will be used to deduplicate models of the same architecture, so architecture hashes are prefered. `archid` should only identify neural network architectures and not model weight information. metadata: Optional model metadata dictionary. """ self.arch = arch self.archid = archid self.metadata = metadata or {} def __repr__(self) -> str: return f"ArchaiModel(\n\tarchid={self.archid}, \n\t" f"metadata={self.metadata}, \n\tarch={self.arch}\n)" def __str__(self) -> str: return repr(self) def clear(self) -> None: """Clear architecture from memory. Sometimes, after evaluating an `ArchaiModel`, there is no need to keep its architecture instantiated, which optimizes memory usage. """ self.arch = None
archai/archai/discrete_search/api/archai_model.py/0
{ "file_path": "archai/archai/discrete_search/api/archai_model.py", "repo_id": "archai", "token_count": 489 }
335
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Any, Callable, Dict, List, Optional, Tuple, Union import ray from overrides import overrides from archai.api.dataset_provider import DatasetProvider from archai.discrete_search.api.archai_model import ArchaiModel from archai.discrete_search.api.model_evaluator import ( AsyncModelEvaluator, ModelEvaluator, ) from archai.discrete_search.api.search_space import DiscreteSearchSpace from archai.common.file_utils import TemporaryFiles def _ray_wrap_training_fn(training_fn) -> Callable: def _stateful_training_fn( arch: ArchaiModel, dataset: DatasetProvider, budget: float, training_state: Optional[Dict[str, Any]] = None ) -> Tuple[ArchaiModel, float, Dict[str, Any]]: metric_result, training_state = training_fn(arch, dataset, budget, training_state) return arch, metric_result, training_state return _stateful_training_fn class ProgressiveTraining(ModelEvaluator): """Progressive training evaluator.""" def __init__(self, search_space: DiscreteSearchSpace, dataset: DatasetProvider, training_fn: Callable) -> None: """Initialize the evaluator. Args: search_space: Search space. training_fn: Training function. """ self.search_space = search_space self.training_fn = training_fn self.dataset = dataset # Training state buffer (e.g optimizer state) for each architecture id self.training_states = {} @overrides def evaluate(self, arch: ArchaiModel, budget: Optional[float] = None) -> float: # Tries to retrieve previous training state tr_state = self.training_states.get(arch.archid, None) # Computes metric and updates training state metric_result, updated_tr_state = self.training_fn(arch, self.dataset, budget, tr_state) self.training_states[arch.archid] = updated_tr_state return metric_result class RayProgressiveTraining(AsyncModelEvaluator): """Progressive training evaluator using Ray.""" def __init__( self, search_space: DiscreteSearchSpace, dataset: DatasetProvider, training_fn: Callable, timeout: Optional[float] = None, force_stop: Optional[bool] = False, **ray_kwargs ) -> None: """Initialize the evaluator. Args: search_space: Search space. training_fn: Training function. timeout: Timeout (seconds) for fetching results. force_stop: If True, forces to stop all training jobs when fetching results. """ self.search_space = search_space self.dataset = dataset if ray_kwargs: self.compute_fn = ray.remote(**ray_kwargs)(_ray_wrap_training_fn(training_fn)) else: self.compute_fn = ray.remote(_ray_wrap_training_fn(training_fn)) self.timeout = timeout self.force_stop = force_stop # Buffer that stores original model references from `send` calls # to update weights after training is complete self.models = [] # Ray training job object refs self.results_ref = [] # Training state buffer (e.g optimizer state) for each architecture id self.training_states = {} @overrides def send(self, arch: ArchaiModel, budget: Optional[float] = None) -> None: # Stores original model reference self.models.append(arch) current_tr_state = self.training_states.get(arch.archid, None) self.results_ref.append(self.compute_fn.remote(arch, self.dataset, budget, current_tr_state)) @overrides def fetch_all(self) -> List[Union[float, None]]: results = [None] * len(self.results_ref) # Fetchs training job results if not self.timeout: results = ray.get(self.results_ref, timeout=self.timeout) else: # Maps each object from the object_refs list to its index ref2idx = {ref: i for i, ref in enumerate(self.results_ref)} # Gets all results available within `self.timeout` seconds. complete_objs, incomplete_objs = ray.wait( self.results_ref, timeout=self.timeout, num_returns=len(self.results_ref) ) partial_results = ray.get(complete_objs) for ref, result in zip(complete_objs, partial_results): results[ref2idx[ref]] = result for incomplete_obj in incomplete_objs: ray.cancel(incomplete_obj, force=self.force_stop) # Gathers metrics and syncs local references metric_results = [] for job_id, job_results in enumerate(results): if job_results: trained_model, job_metric, training_state = job_results # Syncs model weights # On windows you cannot open a named temporary file a second time. temp_file_name = None with TemporaryFiles() as tmp: temp_file_name = tmp.get_temp_file() self.search_space.save_model_weights(trained_model, temp_file_name) self.search_space.load_model_weights(self.models[job_id], temp_file_name) # Syncs training state self.training_states[trained_model.archid] = training_state metric_results.append(job_metric) # Resets model and job buffers self.models = [] self.results_ref = [] return metric_results
archai/archai/discrete_search/evaluators/progressive_training.py/0
{ "file_path": "archai/archai/discrete_search/evaluators/progressive_training.py", "repo_id": "archai", "token_count": 2288 }
336
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import math from random import Random from numbers import Number from typing import Any, List, Union, Optional class DiscreteChoice: def __init__(self, choices: List[Union[int, float, str]], probabilities: Optional[List[float]] = None, encode_strategy: str = 'auto') -> None: """ Stores a discrete choice of numeric or non-numeric values. The choice can be encoded as a numeric value or using one-hot encoding depending on the value passed to `encode_strategy`. Args: choices (List[Union[int, float, str]]): List of choices. Choices can be integers, floats or strings. probabilities (Optional[List[float]], optional): Probability distribution of each choice used during sampling. If `None`, a uniform distribution is used. encode_strategy (str, optional): Encoding strategy to use ['one_hot', 'numeric']. If 'auto', the encoding strategy is chosen based on the type of the choices. Defaults to 'auto'. """ self.choices = choices self.probabilities = probabilities if encode_strategy == 'auto': encode_strategy = ( 'numeric' if all(isinstance(choice, Number) for choice in choices) else 'one_hot' ) self.encode_strategy = encode_strategy def __getitem__(self, idx: str) -> Any: return self.choices[idx] def __repr__(self) -> str: return f"DiscreteChoice({repr(self.choices)})" def __str__(self) -> str: return self.__repr__() def __len__(self) -> int: return len(self.choices) def encode(self, option: Any) -> List[float]: """Encodes the option into a numeric value or a one-hot encoding. Args: option (Any): Option to encode. Returns: List[float]: Encoded option. """ if self.encode_strategy == 'one_hot': assert option in self.choices, f'Invalid option: {option}. Valid options: {self.choices}' return [float(choice == option) for choice in self.choices] return [float(option)] def random_sample(self, rng: Optional[Random] = None) -> Any: """Randomly samples a choice from the discrete set. Args: rng (Optional[Random], optional): Random number generator. Returns: Any: Randomly sampled choice. """ rng = rng or Random() return rng.choices(self.choices, weights=self.probabilities, k=1)[0]
archai/archai/discrete_search/search_spaces/config/discrete_choice.py/0
{ "file_path": "archai/archai/discrete_search/search_spaces/config/discrete_choice.py", "repo_id": "archai", "token_count": 1106 }
337
import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union, Any import torch import torch.utils.checkpoint from torch import nn from torch.cuda.amp import autocast from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.activations import ACT2FN from transformers.models.gpt2.configuration_gpt2 import GPT2Config from archai.discrete_search.search_spaces.config import ArchConfig try: from flash_attn.modules.mlp import FusedMLP except ImportError: FusedMLP = None try: from flash_attn.ops.layer_norm import dropout_add_layer_norm except ImportError: dropout_add_layer_norm = None from ...utils import get_optim_flag from ...mixed_op import MixedAttentionBlock class GPT2MLP(nn.Module): def __init__(self, hidden_size, intermediate_size, activation=nn.functional.gelu): super().__init__() self.c_fc = nn.Linear(hidden_size, intermediate_size) self.c_proj = nn.Linear(intermediate_size, hidden_size) self.act = activation def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states class GPT2Block(nn.Module): def __init__(self, arch_config: ArchConfig, hf_config: GPT2Config, hidden_size, resid_dropout1: float = 0.0, resid_dropout2: float = 0.0, layer_idx=None): super().__init__() self.ln1 = nn.LayerNorm(hidden_size, eps=hf_config.layer_norm_epsilon) self.ln2 = nn.LayerNorm(hidden_size, eps=hf_config.layer_norm_epsilon) self.attn = MixedAttentionBlock(arch_config, hf_config, hidden_size, layer_idx=layer_idx) self.inner_dim = arch_config.pick('d_inner') self.fused_mlp = get_optim_flag(hf_config, 'fused_mlp') if self.fused_mlp: assert FusedMLP is not None, 'Need to install fused_mlp' self.mlp = FusedMLP(hidden_size, self.inner_dim) else: self.mlp = GPT2MLP(hidden_size, self.inner_dim) self.resid_dropout1 = nn.Dropout(resid_dropout1) self.resid_dropout2 = nn.Dropout(resid_dropout2) self.residual_in_fp32 = getattr(hf_config, 'residual_in_fp32', False) self.fused_dropout_add_ln = get_optim_flag(hf_config, 'fused_dropout_add_ln') if self.fused_dropout_add_ln: assert dropout_add_layer_norm is not None, 'dropout_add_ln is not installed' assert isinstance(self.ln1, nn.LayerNorm) and isinstance(self.resid_dropout1, nn.Dropout) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], residual: Optional[Tuple[torch.FloatTensor]], mixer_subset=None, mixer_kwargs=None, **kwargs ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: if not self.fused_dropout_add_ln: dropped = self.resid_dropout1(hidden_states) residual = (dropped + residual) if residual is not None else dropped hidden_states = self.ln1(residual.to(dtype=self.ln1.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) else: hidden_states, residual = dropout_add_layer_norm( hidden_states, residual, self.ln1.weight, self.ln1.bias, self.resid_dropout1.p if self.training else 0.0, self.ln1.eps, rowscale=None, prenorm=True, residual_in_fp32=self.residual_in_fp32 ) hidden_states, _ = self.attn(hidden_states, **kwargs) if not self.fused_dropout_add_ln: dropped = self.resid_dropout2(hidden_states) residual = (dropped + residual) if residual is not None else dropped hidden_states = self.ln2(residual.to(dtype=self.ln2.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) else: hidden_states, residual = dropout_add_layer_norm( hidden_states, residual, self.ln2.weight, self.ln2.bias, self.resid_dropout2.p if self.training else 0.0, self.ln2.eps, rowscale=None, prenorm=True, residual_in_fp32=self.residual_in_fp32 ) return self.mlp(hidden_states), residual
archai/archai/discrete_search/search_spaces/nlp/tfpp/backbones/gpt2/block.py/0
{ "file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/backbones/gpt2/block.py", "repo_id": "archai", "token_count": 2113 }
338
# Modified from S4: https://github.com/HazyResearch/state-spaces/blob/main/src/models/sequence/ss/s4.py from functools import partial import math import torch import torch.nn as nn import torch.nn.functional as F from transformers import PretrainedConfig from einops import rearrange, repeat import opt_einsum as oe from archai.discrete_search.search_spaces.config import ArchConfig from ..utils import get_optim_flag from .sgconv import GConv optimized = True if optimized: contract = oe.contract else: contract = torch.einsum try: from .fftconv_ import fftconv_func except ImportError: fftconv_func = None @torch.jit.script def mul_sum(q, y): return (q * y).sum(dim=1) class GConv3(GConv): requires_length = True def __init__( self, d_model, d_state=64, l_max=1, # Maximum length of sequence. Fine if not provided: the kernel will keep doubling in length until longer than sequence. However, this can be marginally slower if the true length is not a power of 2 head_dim=1, # maps to head dim in H3 channels=1, # maps 1-dim to C-dim bidirectional=False, # Arguments for FF activation='gelu', # activation in between SS and FF ln=False, # Extra normalization postact=None, # activation after FF initializer=None, # initializer on FF weight_norm=False, # weight normalization on FF hyper_act=None, # Use a "hypernetwork" multiplication use_fast_fftconv=False, dropout=0.0, transposed=True, # axis ordering (B, L, D) or (B, D, L) verbose=False, shift=False, linear=False, mode="cat_randn", # SSM Kernel arguments **kernel_args, ): """ d_state: the dimension of the state, also denoted by N l_max: the maximum sequence length, also denoted by L if this is not known at model creation, set l_max=1 channels: can be interpreted as a number of "heads" bidirectional: bidirectional dropout: standard dropout argument transposed: choose backbone axis ordering of (B, L, H) or (B, H, L) [B=batch size, L=sequence length, H=hidden dimension] Other options are all experimental and should not need to be configured """ assert bidirectional == False, 'currently GConv4 does not support bidirectional=True' assert channels == 1, 'channels should be set to 1 for GConv3, select number of heads with the head_dim parameter' super().__init__(d_model=d_model, d_state=d_state, l_max=l_max, channels=channels, bidirectional=bidirectional, activation=activation, ln=ln, postact=postact, initializer=initializer, weight_norm=weight_norm, hyper_act=hyper_act, use_fast_fftconv=use_fast_fftconv, dropout=dropout, transposed=transposed, verbose=verbose, shift=shift, linear=linear, mode=mode, **kernel_args) self.d_model = d_model self.head_dim = head_dim assert d_model % head_dim == 0 self.h = d_model // head_dim # if self.use_fast_fftconv and not self.head_dim in [1,8]: # print('fast fftconv only supported for head_dim of 1 or 8') # self.use_fast_fftconv = False self.q_proj = nn.Linear(self.d_model, self.d_model) self.k_proj = nn.Linear(self.d_model, self.d_model) self.v_proj = nn.Linear(self.d_model, self.d_model) # self.init_scale = kernel_args.get('init_scale', 0) # self.kernel_dim = kernel_args.get('kernel_dim', 64) # self.num_scales = kernel_args.get('n_scales', None) # if self.num_scales is None: # self.num_scales = 1 + math.ceil(math.log2(l_max/self.kernel_dim)) - self.init_scale decay_min = kernel_args.get('decay_min', 2) decay_max = kernel_args.get('decay_max', 2) self.kernel_list_key = self.init_kernels(h=self.d_model, **kernel_args) self.D_key = nn.Parameter(torch.randn(channels, self.d_model)) self.kernel_list = self.init_kernels(h=self.h, **kernel_args) self.D = nn.Parameter(torch.randn(channels, self.h)) if 'learnable' in mode: self.decay_key = nn.Parameter(torch.rand(self.d_model) * (decay_max - decay_min) + decay_min) self.decay = nn.Parameter(torch.rand(self.h) * (decay_max - decay_min) + decay_min) if 'fixed' in mode: self.decay_key.requires_grad = False self.decay.requires_grad = False else: self.decay_key._optim = {'lr': kernel_args.get('lr', 0.001),} self.decay._optim = {'lr': kernel_args.get('lr', 0.001),} self.register_buffer('multiplier_key', torch.tensor(1.0)) self.register_buffer('multiplier', torch.tensor(1.0)) else: self.register_buffer('multiplier_key', torch.linspace(decay_min, decay_max, self.d_model).view(1, -1, 1)) self.register_buffer('multiplier', torch.linspace(decay_min, decay_max, self.h).view(1, -1, 1)) self.register_buffer('kernel_norm_key', torch.ones(channels, self.d_model, 1)) self.register_buffer('kernel_norm_initialized_key', torch.tensor(0, dtype=torch.bool)) self.register_buffer('kernel_norm', torch.ones(channels, self.h, 1)) self.register_buffer('kernel_norm_initialized', torch.tensor(0, dtype=torch.bool)) self.pw_linear = nn.Linear(self.d_model, self.d_model) def init_kernels(self, h, **kernel_args): kernel_list = nn.ParameterList() for _ in range(self.num_scales): if 'randn' in self.mode: kernel = nn.Parameter(torch.randn(self.channels, h, self.kernel_dim)) elif 'cos' in self.mode: kernel = nn.Parameter(torch.cat([torch.cos(torch.linspace(0, 2*i*math.pi, self.kernel_dim)).expand( self.channels, 1, self.kernel_dim) for i in range(h)], dim=1)[:, torch.randperm(h), :]) else: raise ValueError(f"Unknown mode {self.mode}") kernel._optim = {'lr': kernel_args.get('lr', 0.001),} kernel_list.append(kernel) return kernel_list def get_kernels_forward(self, multiplier, kernel_list_init): kernel_list = [] interpolate_mode = 'nearest' if 'nearest' in self.mode else 'linear' if 'sum' in self.mode: for i in range(self.num_scales): kernel = F.pad( F.interpolate( kernel_list_init[i], scale_factor=2**(i + self.init_scale), mode=interpolate_mode, ), (0, self.kernel_dim*2**(self.num_scales - 1 + self.init_scale) - self.kernel_dim*2**(i + self.init_scale)), ) * multiplier ** (self.num_scales - i - 1) kernel_list.append(kernel) k = sum(kernel_list) elif 'cat' in self.mode: for i in range(self.num_scales): kernel = F.interpolate( kernel_list_init[i], scale_factor=2**(max(0, i-1) + self.init_scale), mode=interpolate_mode, ) * multiplier ** (self.num_scales - i - 1) kernel_list.append(kernel) k = torch.cat(kernel_list, dim=-1) else: raise ValueError(f"Unknown mode {self.mode}") return k # absorbs return_output and transformer src mask def forward(self, u, return_kernel=False): """ u: (B H L) if self.transposed else (B L H) state: (H N) never needed unless you know what you're doing Returns: same shape as u """ if not self.transposed: u = u.transpose(-1, -2) L = u.size(-1) if self.use_fast_fftconv and L % 2 != 0: u = F.pad(u, (0, 1)) k_key = self.get_kernels_forward(self.multiplier_key, self.kernel_list_key) k = self.get_kernels_forward(self.multiplier, self.kernel_list) if 'learnable' in self.mode: k_key = k_key * torch.exp(-self.decay_key.view(1, -1, 1)*torch.log( torch.arange(k_key.size(-1), device=k_key.device)+1).view(1, 1, -1)) k = k * torch.exp(-self.decay.view(1, -1, 1)*torch.log( torch.arange(k.size(-1), device=k.device)+1).view(1, 1, -1)) if not self.kernel_norm_initialized: self.kernel_norm_key = k_key.norm(dim=-1, keepdim=True).detach() self.kernel_norm_initialized_key = torch.tensor(1, dtype=torch.bool, device=k.device) self.kernel_norm = k.norm(dim=-1, keepdim=True).detach() self.kernel_norm_initialized = torch.tensor(1, dtype=torch.bool, device=k.device) if self.verbose: print(f"Key Kernel norm: {self.kernel_norm_key.mean()}, Kernel norm: {self.kernel_norm.mean()}") print(f"Key Kernel size: {k_key.size()}, Kernel size: {k.size()}") k_key = k_key[..., :L] if k_key.size(-1) >= L else F.pad(k_key, (0, L - k_key.size(-1))) k = k[..., :L] if k.size(-1) >= L else F.pad(k, (0, L - k.size(-1))) k_key = k_key / self.kernel_norm_key # * (L / self.l_max) ** 0.5 k = k / self.kernel_norm # * (L / self.l_max) ** 0.5 # Convolution if self.bidirectional: raise NotImplementedError # compute key, query, and value u = rearrange(u, 'b h l -> h (b l)') # (H B*L) dtype = (self.q_proj.weight.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype()) query = self.q_proj.weight @ u + self.q_proj.bias.to(dtype).unsqueeze(-1) key = self.k_proj.weight @ u + self.k_proj.bias.to(dtype).unsqueeze(-1) # (H B*L) value = self.v_proj.weight @ u + self.v_proj.bias.to(dtype).unsqueeze(-1) query, key, value = [rearrange(x, 'h (b l) -> b h l', l=L) for x in [query, key, value]] # first conv k_key = rearrange(k_key, '1 h l -> h l') if self.use_fast_fftconv: dropout_mask = None # No GeLU after the SSM # We want output_hbl=True so that k has the same layout as q and v for the next # fftconv key = fftconv_func(key, k_key, self.D_key.squeeze(0), dropout_mask, False, False, True) # This line below looks like it doesn't do anything, but it gets the stride right # for the case batch_size=1. In that case k has stride (L, L, 1), but q and v has # stride (H * L, L, 1). The two strides are equivalent because batch_size=1, but # the C++ code doesn't like that. key = rearrange(rearrange(key, 'b h l -> h b l'), 'h b l -> b h l') else: fft_size = 2*L k_key_f = torch.fft.rfft(k_key, n=fft_size) # (H L+1) key_f = torch.fft.rfft(key, n=fft_size) # (B H L+1) y_f = contract('bhl,hl->bhl', key_f, k_key_f) y = torch.fft.irfft(y_f, n=fft_size)[..., :L] # (B H L) # Compute D term in state space equation - essentially a skip connection key = y + contract('bhl,1h->bhl', key, self.D_key) # second conv k = rearrange(k, '1 h l -> h l') # (H L) if self.use_fast_fftconv: if self.head_dim in [1,8]: dropout_mask = None # No GeLU after the SSM # Set output_hbl_layout=True since we'll be doing a matmul right after y = fftconv_func(key, k, self.D.squeeze(0), dropout_mask, False, False, True, value, self.head_dim, query) else: kv = (rearrange(key, 'b (h d1) l -> b d1 1 h l', d1=self.head_dim) * rearrange(value, 'b (h d2) l -> b 1 d2 h l', d2=self.head_dim)) # B d1 d2 h L kv = rearrange(kv, 'b d1 d2 h l -> b (d1 d2 h) l') k = repeat(k, 'h l -> d h l', d=self.head_dim**2).clone().contiguous() k = rearrange(k, 'd h l -> (d h) l') D = repeat(self.D, '1 h -> d h', d=self.head_dim**2).clone().contiguous() D = rearrange(D, 'd h -> (d h)') y = fftconv_func(kv, k, D, None, False, False, True) y = rearrange(y, 'b (d1 d2 h) l -> b d1 d2 h l', d1=self.head_dim, d2=self.head_dim) query = rearrange(query, 'b (h d1) l -> b d1 1 h l', d1=self.head_dim) # einsum is way slower than multiply and then sum. y = mul_sum(y, query) y = rearrange(y, 'b d h l -> b (d h) l') else: fft_size = 2*L kv = (rearrange(key, 'b (h d1) l -> b d1 1 h l', d1=self.head_dim) * rearrange(value, 'b (h d2) l -> b 1 d2 h l', d2=self.head_dim)) # B d1 d2 h L kv_f = torch.fft.rfft(kv, n=fft_size) / fft_size k_f = torch.fft.rfft(k, n=fft_size) # H L+1 y = torch.fft.irfft(kv_f * k_f, n=fft_size, norm='forward')[..., :L] # B d1 d2 h L y = y + kv * self.D.unsqueeze(-1) # B d1 d2 h L query = rearrange(query, 'b (h d1) l -> b d1 1 h l', d1=self.head_dim) # einsum is way slower than multiply and then sum. if self.head_dim > 1: y = mul_sum(y, query) y = rearrange(y, 'b d h l -> b (d h) l') else: y = rearrange(y * query, 'b 1 1 h l -> b h l') # Reshape to flatten channels # y = rearrange(y, '... c h l -> ... (c h) l') if not self.linear: y = self.dropout(self.activation(y)) if not self.transposed: y = y.transpose(-1, -2) if not self.linear: y = self.norm(y) y = self.output_linear(y) # y = self.pw_linear(y) if return_kernel: return y, k return y, None @property def d_state(self): return self.h * self.n @property def d_output(self): return self.h @property def state_to_tensor(self): return lambda state: rearrange('... h n -> ... (h n)', state) class SGConv3(nn.Module): def __init__(self, arch_config: ArchConfig, hidden_size: int, total_heads: int, op_heads: int, hf_config: PretrainedConfig, **kwargs): super().__init__() assert hidden_size % total_heads == 0 self.hidden_size = hidden_size self.total_heads = total_heads self.op_heads = op_heads # Architecture params self.kernel_size = arch_config.pick('kernel_size') self.use_fast_fftconv = get_optim_flag(hf_config, 'fast_fftconv') self.channels = 1 self.op_size = op_heads * (hidden_size // total_heads) self.in_proj = nn.Sequential( nn.Linear(hidden_size, self.op_size * 2), nn.GLU(dim=-1) ) self.sgconv = GConv3( self.op_size, l_max=hf_config.max_position_embeddings, head_dim=self.op_heads, channels=self.channels, kernel_dim=self.kernel_size, use_fast_fftconv=self.use_fast_fftconv, transposed=False, verbose=False ) self.act = nn.GELU(approximate='none') def forward(self, x: torch.Tensor, **kwargs): output, _ = self.sgconv(self.in_proj(x)) return self.act(output), None if __name__ == '__main__': B = 2 # batch size H = 768 # d_model L = 2048 # sequence length device = 'cuda' import torch.utils.benchmark as benchmark flash_layer = GConv3(d_model=H, l_max=L, head_dim=12, kernel_dim=128, use_fast_fftconv=True, transposed=False).to(device) layer = GConv3(d_model=H, l_max=L, head_dim=8, kernel_dim=128, use_fast_fftconv=False, transposed=False).to(device) u = torch.randn(B, L, H, device=device, dtype=torch.float32, requires_grad=True) t0 = benchmark.Timer( stmt='flash_layer(u)', globals={'flash_layer': flash_layer, 'u': u}) t1 = benchmark.Timer( stmt='layer(u)', globals={'layer': layer, 'u': u}) print(t0.timeit(100)) print(t1.timeit(100))
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/sgconv3.py/0
{ "file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/sgconv3.py", "repo_id": "archai", "token_count": 8190 }
339
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Any, List, Optional from transformers.models.gpt2.configuration_gpt2 import GPT2Config def _map_to_list(variable: Any, size: int) -> List[Any]: if isinstance(variable, list): size_diff = size - len(variable) if size_diff < 0: return variable[:size] elif size_diff == 0: return variable elif size_diff > 0: return variable + [variable[0]] * size_diff return [variable] * size class GPT2FlexConfig(GPT2Config): model_type = "gpt2-flex" def __init__(self, *args, primer_square: Optional[bool] = False, **kwargs) -> None: super().__init__(*args, **kwargs) self.primer_square = primer_square if primer_square: self.activation_function = "relu" self.n_inner = self.n_inner if self.n_inner is not None else 4 * self.n_embd self.n_inner = _map_to_list(self.n_inner, self.n_layer) self.n_head = _map_to_list(self.n_head, self.n_layer)
archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/configuration_gpt2_flex.py/0
{ "file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/configuration_gpt2_flex.py", "repo_id": "archai", "token_count": 454 }
340
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Any, Optional, Tuple from transformers.configuration_utils import PretrainedConfig from archai.onnx.config_utils.onnx_config_base import OnnxConfigWithPast class CodeGenOnnxConfig(OnnxConfigWithPast): """CodeGen ONNX configuration (with past key/values support).""" def __init__( self, config: PretrainedConfig, task: Optional[str] = "causal-lm", use_past: Optional[bool] = False, ) -> None: super().__init__(config, task=task, use_past=use_past, past_key_values=2) @property def num_layers(self) -> int: return self.config.n_layer @property def is_ort_graph_optimizable(self) -> bool: return False @property def ort_graph_optimizer_args(self) -> Tuple[Any, ...]: return (self.num_attention_heads, self.hidden_size)
archai/archai/onnx/config_utils/codegen_onnx_config.py/0
{ "file_path": "archai/archai/onnx/config_utils/codegen_onnx_config.py", "repo_id": "archai", "token_count": 353 }
341
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import torch class OnnxDynamicObserver: """DynamicObserver that is compliant with ONNX-based graphs. This class can be used to perform symmetric or assymetric quantization, depending on the `dtype` provided. `qint8` is usually used for symmetric quantization, while `quint8` is used for assymetric quantization. """ def __init__(self, dtype: str) -> None: """Initialize the class by setting appropriate values for quantization bounds. Args: dtype: Type of quantization operators. This should be either `torch.quint8` or `torch.qint8`. """ self.dtype = dtype self.eps = torch.finfo(torch.float32).eps assert dtype in (torch.quint8, torch.qint8) if dtype == torch.quint8: self.qmin, self.qmax = 0, 255 else: self.qmin, self.qmax = -128, 127 def __call__(self, x: torch.Tensor) -> None: x = x.detach().float() self.min_val, self.max_val = x.min().view(-1), x.max().view(-1) def calculate_qparams(self) -> None: """Calculate the quantization parameters.""" if self.dtype == torch.qint8: scale = torch.max(self.max_val.clamp(min=0), -self.min_val.clamp(max=0)) / 127 zero_pointer = torch.zeros_like(scale).to(torch.int64) return scale.clamp(min=self.eps), zero_pointer else: scale = (self.max_val - self.min_val) / float(self.qmax - self.qmin) scale = scale.clamp(min=self.eps) zero_pointer = self.qmin - torch.round(self.min_val / scale) zero_pointer = zero_pointer.clamp(min=self.qmin, max=self.qmax).to(torch.int64) return scale, zero_pointer
archai/archai/quantization/observers.py/0
{ "file_path": "archai/archai/quantization/observers.py", "repo_id": "archai", "token_count": 791 }
342
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from overrides import overrides from archai.supergraph.algos.darts.darts_model_desc_builder import DartsModelDescBuilder from archai.supergraph.algos.didarts.didarts_arch_trainer import DidartsArchTrainer from archai.supergraph.nas.arch_trainer import TArchTrainer from archai.supergraph.nas.exp_runner import ExperimentRunner class DiDartsExperimentRunner(ExperimentRunner): @overrides def model_desc_builder(self)->DartsModelDescBuilder: return DartsModelDescBuilder() @overrides def trainer_class(self)->TArchTrainer: return DidartsArchTrainer
archai/archai/supergraph/algos/didarts/didarts_exp_runner.py/0
{ "file_path": "archai/archai/supergraph/algos/didarts/didarts_exp_runner.py", "repo_id": "archai", "token_count": 212 }
343
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Iterator, Optional, Tuple import torch from overrides import overrides from torch import Tensor, nn from archai.common.utils import zip_eq from archai.supergraph.nas.arch_params import ArchParams from archai.supergraph.nas.model_desc import OpDesc from archai.supergraph.nas.operations import Op # TODO: reduction cell might have output reduced by 2^1=2X due to # stride 2 through input nodes however FactorizedReduce does only # 4X reduction. Is this correct? class GsOp(Op): """The output of GsOp is weighted output of all allowed primitives. """ PRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', # identity 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5', 'none' # this must be at the end so top1 doesn't chose it ] def __init__(self, op_desc:OpDesc, arch_params:Optional[ArchParams], affine:bool): super().__init__() # assume last PRIMITIVE is 'none' assert GsOp.PRIMITIVES[-1] == 'none' self._ops = nn.ModuleList() for primitive in GsOp.PRIMITIVES: op = Op.create( OpDesc(primitive, op_desc.params, in_len=1, trainables=None), affine=affine, arch_params=None) self._ops.append(op) # we do this at the end so that we can capture all arch params registered by # any previous child modules self._setup_arch_params(arch_params) def set_op_sampled_weights(self, sampled_weights:Tensor): ''' Sets the weight for each op ''' assert sampled_weights.shape[0] == len(GsOp.PRIMITIVES) self._sampled_weights = sampled_weights @overrides def forward(self, x): assert self._sampled_weights is not None return sum(w * op(x) for w, op in zip_eq(self._sampled_weights, self._ops)) @overrides def finalize(self, sampled_weights) -> Tuple[OpDesc, Optional[float]]: # finalization where each edge gets to keep as many # unique operations that are **sampled at the node level** assert sampled_weights.shape[0] == len(GsOp.PRIMITIVES) # we can't handle empty op assert sampled_weights.bool().any() greater_than_0 = sampled_weights > 0 children = [] children_ins = [] selected_alphas = [] for i in range(greater_than_0.size()[0]): if greater_than_0[i]: children.append(self._ops[i].finalize()[0]) selected_alphas.append(self._alphas[0][i].item()) # all the ops will operate on the single node input children_ins.append(0) final_op_desc = OpDesc(name='multi_op', params={ # copy convolution parameters 'conv': self.desc.params['conv'] }, # number of inputs remains same and in this # case should be 1 in_len=self.desc.in_len, trainables=None, # primitive's finalize call also records its # weights in description. finalize call returns # (desc, rank) where rank for primitive is None children = children, children_ins = children_ins ) max_alpha = 0.0 if selected_alphas: max_alpha = max(selected_alphas) return final_op_desc, max_alpha @overrides def can_drop_path(self) -> bool: return False @overrides def ops(self)->Iterator[Tuple['Op', float]]: # type: ignore return iter(sorted(zip_eq(self._ops, self._alphas[0]), key=lambda t:t[1], reverse=True)) def _setup_arch_params(self, arch_params:Optional[ArchParams])->None: # do we have shared arch params? if arch_params is None: # create our own arch params new_p = nn.Parameter( # TODO: use better init than uniform random? 1.0e-3*torch.randn(len(GsOp.PRIMITIVES)), requires_grad=True) self.create_arch_params([('alphas', new_p)]) else: assert arch_params.has_kind('alphas') self.set_arch_params(arch_params) # we store alphas in list so Pytorch don't register them self._alphas = list(self.arch_params().param_by_kind('alphas')) assert len(self._alphas)==1
archai/archai/supergraph/algos/gumbelsoftmax/gs_op.py/0
{ "file_path": "archai/archai/supergraph/algos/gumbelsoftmax/gs_op.py", "repo_id": "archai", "token_count": 2271 }
344
# Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pickle import random import time from typing import List, Optional, OrderedDict import numpy as np from torch import nn from archai.common import utils from archai.supergraph.algos.nasbench101 import config, model_builder from archai.supergraph.algos.nasbench101 import model_spec as _model_spec # Bring ModelSpec to top-level for convenience. See lib/model_spec.py. ModelSpec = _model_spec.ModelSpec class OutOfDomainError(Exception): """Indicates that the requested graph is outside of the search domain.""" class Nasbench101Dataset(object): """User-facing API for accessing the NASBench dataset.""" VALID_EPOCHS = [4, 12, 36, 108] def __init__(self, dataset_file, seed=None): self.config = config.build_config() random.seed(seed) dataset_file = utils.full_path(dataset_file) logging.info(f'Loading dataset from file "{dataset_file}"...') start = time.time() with open(dataset_file, 'rb') as f: self.data:OrderedDict[str, dict] = pickle.load(f) self.module_hashes = list(self.data.keys()) elapsed = time.time() - start logging.info('Loaded dataset in %d seconds' % elapsed) def __len__(self): return len(self.module_hashes) def __getitem__(self, idx): module_hash = self.module_hashes[idx] return self.data[module_hash] def get_data(self, idx, epochs:Optional[int]=108, run_index:Optional[int]=None, step_index:Optional[int]=-1)->dict: module_hash = self.module_hashes[idx] d = self.data[module_hash] return self.filter_data(d, epochs=epochs, run_index=run_index, step_index=step_index) def filter_data(self, d:dict, epochs:Optional[int]=108, run_index:Optional[int]=None, step_index:Optional[int]=-1)->dict: if epochs is not None: d = d['metrics'][epochs] if run_index is not None: d = d[run_index] if step_index is not None: d = d[step_index] return d def get_test_acc(self, idx, epochs=108, step_index=-1)->List[float]: module_hash = self.module_hashes[idx] runs = self.data[module_hash]['metrics'][epochs] return [r[step_index]['test_accuracy'] for r in runs] def create_model_spec(self, desc_matrix:List[List[int]], vertex_ops:List[str])->ModelSpec: return ModelSpec(desc_matrix, vertex_ops) def query(self, desc_matrix:List[List[int]], vertex_ops:List[str], epochs:Optional[int]=108, run_index:Optional[int]=None, step_index:Optional[int]=-1): model_spec = self.create_model_spec(desc_matrix, vertex_ops) d = self.get_metrics_from_spec(model_spec) return self.filter_data(d, epochs=epochs, run_index=run_index, step_index=step_index) def create_model(self, idx:int, device=None, stem_out_channels=128, num_stacks=3, num_modules_per_stack=3, num_labels=10)->nn.Module: module_hash = self.module_hashes[idx] d = self.data[module_hash] adj, ops = d['module_adjacency'], d['module_operations'] return model_builder.build(adj, ops, device=device, stem_out_channels=stem_out_channels, num_stacks=num_stacks, num_modules_per_stack=num_modules_per_stack, num_labels=num_labels) def is_valid(self, desc_matrix:List[List[int]], vertex_ops:List[str]): """Checks the validity of the model_spec. For the purposes of benchmarking, this does not increment the budget counters. Returns: True if model is within space. """ model_spec = self.create_model_spec(desc_matrix, vertex_ops) try: self._check_spec(model_spec) except OutOfDomainError: return False return True def get_metrics_from_spec(self, model_spec): self._check_spec(model_spec) module_hash = self._hash_spec(model_spec) return self.data[module_hash] def _check_spec(self, model_spec): """Checks that the model spec is within the dataset.""" if not model_spec.valid_spec: raise OutOfDomainError('invalid spec, provided graph is disconnected.') num_vertices = len(model_spec.ops) num_edges = np.sum(model_spec.matrix) if num_vertices > self.config['module_vertices']: raise OutOfDomainError('too many vertices, got %d (max vertices = %d)' % (num_vertices, self.config['module_vertices'])) if num_edges > self.config['max_edges']: raise OutOfDomainError('too many edges, got %d (max edges = %d)' % (num_edges, self.config['max_edges'])) if model_spec.ops[0] != 'input': raise OutOfDomainError('first operation should be \'input\'') if model_spec.ops[-1] != 'output': raise OutOfDomainError('last operation should be \'output\'') for op in model_spec.ops[1:-1]: if op not in self.config['available_ops']: raise OutOfDomainError('unsupported op %s (available ops = %s)' % (op, self.config['available_ops'])) def _hash_spec(self, model_spec): """Returns the MD5 hash for a provided model_spec.""" return model_spec.hash_spec(self.config['available_ops'])
archai/archai/supergraph/algos/nasbench101/nasbench101_dataset.py/0
{ "file_path": "archai/archai/supergraph/algos/nasbench101/nasbench101_dataset.py", "repo_id": "archai", "token_count": 2179 }
345
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from overrides import overrides from archai.supergraph.algos.xnas.xnas_arch_trainer import XnasArchTrainer from archai.supergraph.algos.xnas.xnas_model_desc_builder import XnasModelDescBuilder from archai.supergraph.nas.arch_trainer import TArchTrainer from archai.supergraph.nas.exp_runner import ExperimentRunner class XnasExperimentRunner(ExperimentRunner): @overrides def model_desc_builder(self)->XnasModelDescBuilder: return XnasModelDescBuilder() @overrides def trainer_class(self)->TArchTrainer: return XnasArchTrainer
archai/archai/supergraph/algos/xnas/xnas_exp_runner.py/0
{ "file_path": "archai/archai/supergraph/algos/xnas/xnas_exp_runner.py", "repo_id": "archai", "token_count": 211 }
346
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os import torchvision from overrides import overrides from torchvision.transforms import transforms from archai.common import utils from archai.common.config import Config from archai.supergraph.datasets.dataset_provider import ( DatasetProvider, ImgSize, TrainTestDatasets, register_dataset_provider, ) class Flower102Provider(DatasetProvider): def __init__(self, conf_dataset:Config): super().__init__(conf_dataset) self._dataroot = utils.full_path(conf_dataset['dataroot']) @overrides def get_datasets(self, load_train:bool, load_test:bool, transform_train, transform_test)->TrainTestDatasets: trainset, testset = None, None if load_train: trainpath = os.path.join(self._dataroot, 'flower102', 'train') trainset = torchvision.datasets.ImageFolder(trainpath, transform=transform_train) if load_test: testpath = os.path.join(self._dataroot, 'flower102', 'test') testset = torchvision.datasets.ImageFolder(testpath, transform=transform_test) return trainset, testset @overrides def get_transforms(self, img_size:ImgSize)->tuple: print(f'IMG SIZE: {img_size}') if isinstance(img_size, int): img_size = (img_size, img_size) # MEAN, STD computed for flower102 MEAN = [0.5190, 0.4101, 0.3274] STD = [0.2972, 0.2488, 0.2847] # transformations match that in # https://github.com/antoyang/NAS-Benchmark/blob/master/DARTS/preproc.py train_transf = [ transforms.RandomResizedCrop(img_size, scale=(0.75, 1)), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2) ] margin_size = (int(img_size[0] + img_size[0]*0.1), int(img_size[1] + img_size[1]*0.1)) test_transf = [transforms.Resize(margin_size), transforms.CenterCrop(img_size)] normalize = [ transforms.ToTensor(), transforms.Normalize(MEAN, STD) ] train_transform = transforms.Compose(train_transf + normalize) test_transform = transforms.Compose(test_transf + normalize) return train_transform, test_transform register_dataset_provider('flower102', Flower102Provider)
archai/archai/supergraph/datasets/providers/flower102_provider.py/0
{ "file_path": "archai/archai/supergraph/datasets/providers/flower102_provider.py", "repo_id": "archai", "token_count": 1099 }
347
import math import torch import torch.nn as nn from archai.supergraph.models.shakedrop import ShakeDrop def conv3x3(in_planes, out_planes, stride=1): """ 3x3 convolution with padding """ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): outchannel_ratio = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, p_shakedrop=1.0): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes) self.bn3 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.shake_drop = ShakeDrop(p_shakedrop) def forward(self, x): out = self.bn1(x) out = self.conv1(out) out = self.bn2(out) out = self.relu(out) out = self.conv2(out) out = self.bn3(out) out = self.shake_drop(out) if self.downsample is not None: shortcut = self.downsample(x) featuremap_size = shortcut.size()[2:4] else: shortcut = x featuremap_size = out.size()[2:4] batch_size = out.size()[0] residual_channel = out.size()[1] shortcut_channel = shortcut.size()[1] if residual_channel != shortcut_channel: padding = torch.autograd.Variable( torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0], featuremap_size[1]).fill_(0)) out += torch.cat((shortcut, padding), dim=1) else: out += shortcut return out class Bottleneck(nn.Module): outchannel_ratio = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, p_shakedrop=1.0): super(Bottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(inplanes) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride, padding=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * 1)) self.conv3 = nn.Conv2d((planes * 1), planes * Bottleneck.outchannel_ratio, kernel_size=1, bias=False) self.bn4 = nn.BatchNorm2d(planes * Bottleneck.outchannel_ratio) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.shake_drop = ShakeDrop(p_shakedrop) def forward(self, x): out = self.bn1(x) out = self.conv1(out) out = self.bn2(out) out = self.relu(out) out = self.conv2(out) out = self.bn3(out) out = self.relu(out) out = self.conv3(out) out = self.bn4(out) out = self.shake_drop(out) if self.downsample is not None: shortcut = self.downsample(x) featuremap_size = shortcut.size()[2:4] else: shortcut = x featuremap_size = out.size()[2:4] batch_size = out.size()[0] residual_channel = out.size()[1] shortcut_channel = shortcut.size()[1] if residual_channel != shortcut_channel: padding = torch.autograd.Variable( torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0], featuremap_size[1]).fill_(0)) out += torch.cat((shortcut, padding), dim=1) else: out += shortcut return out class PyramidNet(nn.Module): def __init__(self, dataset, depth, alpha, n_classes, bottleneck=True): super(PyramidNet, self).__init__() self.dataset = dataset if self.dataset.startswith('cifar'): self.inplanes = 16 if bottleneck: n = int((depth - 2) / 9) block = Bottleneck else: n = int((depth - 2) / 6) block = BasicBlock self.addrate = alpha / (3 * n * 1.0) self.ps_shakedrop = [1. - (1.0 - (0.5 / (3 * n)) * (i + 1)) for i in range(3 * n)] self.input_featuremap_dim = self.inplanes self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim) self.featuremap_dim = self.input_featuremap_dim self.layer1 = self.pyramidal_make_layer(block, n) self.layer2 = self.pyramidal_make_layer(block, n, stride=2) self.layer3 = self.pyramidal_make_layer(block, n, stride=2) self.final_featuremap_dim = self.input_featuremap_dim self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim) self.relu_final = nn.ReLU(inplace=True) self.avgpool = nn.AvgPool2d(8) self.fc = nn.Linear(self.final_featuremap_dim, n_classes) elif dataset == 'imagenet': blocks = {18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck} layers = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]} if layers.get(depth) is None: if bottleneck == True: blocks[depth] = Bottleneck temp_cfg = int((depth - 2) / 12) else: blocks[depth] = BasicBlock temp_cfg = int((depth - 2) / 8) layers[depth] = [temp_cfg, temp_cfg, temp_cfg, temp_cfg] self.inplanes = 64 self.addrate = alpha / (sum(layers[depth]) * 1.0) self.input_featuremap_dim = self.inplanes self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.featuremap_dim = self.input_featuremap_dim self.layer1 = self.pyramidal_make_layer(blocks[depth], layers[depth][0]) self.layer2 = self.pyramidal_make_layer(blocks[depth], layers[depth][1], stride=2) self.layer3 = self.pyramidal_make_layer(blocks[depth], layers[depth][2], stride=2) self.layer4 = self.pyramidal_make_layer(blocks[depth], layers[depth][3], stride=2) self.final_featuremap_dim = self.input_featuremap_dim self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim) self.relu_final = nn.ReLU(inplace=True) self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(self.final_featuremap_dim, n_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() assert len(self.ps_shakedrop) == 0, self.ps_shakedrop def pyramidal_make_layer(self, block, block_depth, stride=1): downsample = None if stride != 1: # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio: downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True) layers = [] self.featuremap_dim = self.featuremap_dim + self.addrate layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample, p_shakedrop=self.ps_shakedrop.pop(0))) for i in range(1, block_depth): temp_featuremap_dim = self.featuremap_dim + self.addrate layers.append( block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1, p_shakedrop=self.ps_shakedrop.pop(0))) self.featuremap_dim = temp_featuremap_dim self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio return nn.Sequential(*layers) def forward(self, x): if self.dataset == 'cifar10' or self.dataset == 'cifar100': x = self.conv1(x) x = self.bn1(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.bn_final(x) x = self.relu_final(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) elif self.dataset == 'imagenet': x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.bn_final(x) x = self.relu_final(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x
archai/archai/supergraph/models/pyramidnet.py/0
{ "file_path": "archai/archai/supergraph/models/pyramidnet.py", "repo_id": "archai", "token_count": 4758 }
348
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import List, Optional import torch from overrides import overrides from torch import nn from archai.supergraph.nas.arch_module import ArchModule from archai.supergraph.nas.model_desc import EdgeDesc from archai.supergraph.nas.operations import DropPath_, Op class DagEdge(ArchModule): def __init__(self, desc:EdgeDesc, affine:bool, droppath:bool, template_edge:Optional['DagEdge'])->None: super().__init__() # we may need to wrap op is droppath is needed self._wrapped = self._op = Op.create(desc.op_desc, affine, template_edge.op().arch_params() if template_edge is not None else None) if droppath and self._op.can_drop_path(): assert self.training self._wrapped = nn.Sequential(self._op, DropPath_()) self.input_ids = desc.input_ids self.desc = desc @overrides def forward(self, inputs:List[torch.Tensor]): if len(self.input_ids)==1: return self._wrapped(inputs[self.input_ids[0]]) elif len(self.input_ids) == len(inputs): # for perf return self._wrapped(inputs) else: return self._wrapped([inputs[i] for i in self.input_ids]) def op(self)->Op: return self._op
archai/archai/supergraph/nas/dag_edge.py/0
{ "file_path": "archai/archai/supergraph/nas/dag_edge.py", "repo_id": "archai", "token_count": 563 }
349
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os import weakref from collections import UserDict from typing import Callable import torch from archai.common import utils from archai.common.config import Config _CallbackType = Callable #[['CheckPoint', *kargs: Any, **kwargs: Any], None] class CheckPoint(UserDict): """Callback based checkpoint model. Start new checkpoint by calling new() and save it by calling commit(). This class is also dictionary. Items that needs be saved can be done so by setting key, value pairs after new(). As any dictionary key is set, checkpoint becomes dirty. On commit(), dictionary is saved and emptied. Invariant: checkpoint remains dirty until commit() is called. """ def __init__(self, conf_checkpoint:Config, load_existing:bool) -> None: super().__init__() # region config vars self.filepath = utils.full_path(conf_checkpoint['filename']) self.freq = conf_checkpoint['freq'] # endregion self._callbacks = [] if load_existing: self.load_existing() def load_existing(self)->bool: assert self.is_empty() if self.filepath and os.path.exists(self.filepath): d = torch.load(self.filepath, map_location=torch.device('cpu')) self.clear() self.update(d) return True return False def new(self, *kargs, **kvargs)->None: self.clear() for func, obj in self._callbacks: func = func() # get actual refrence from weakref if obj is not None: obj = obj() # get actual reference from weakref if obj is None: continue # instance is gone func(obj, self, *kargs, **kvargs) elif func is not None: func(self, *kargs, **kvargs) # else func is garbage collected def commit(self)->None: assert self.filepath and not self.is_empty() torch.save(self.data, self.filepath) # clean up after commit so we don't hold up references def is_empty(self)->bool: return len(self) == 0 # TODO: this is no longer used, should we remove it? def subscribe(self, callback:_CallbackType)->None: obj = getattr(callback, '__self__', None) callback_ref = weakref.ref(callback.__func__), \ None if obj is None else weakref.ref(obj) self._callbacks.append(callback_ref)
archai/archai/supergraph/utils/checkpoint.py/0
{ "file_path": "archai/archai/supergraph/utils/checkpoint.py", "repo_id": "archai", "token_count": 1015 }
350
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import json import os from dataclasses import asdict, dataclass, field from typing import Any, Dict, Union import deepspeed import torch from archai.common.file_utils import get_full_path @dataclass class DsTrainingArguments: """Define arguments used in the DeepSpeed training pipeline. Args: output_dir: Output folder. ds_config: DeepSpeed configuration (dictionary or path to JSON file). do_eval: Whether to enable evaluation. max_steps: Maximum number of training steps. logging_steps: Number of steps between logs. save_steps: Number of steps between checkpoints. seed: Random seed. local_rank: Rank of process. backend: Distributed training backend. eval_steps: Number of steps between evaluations. pipe_parallel: Whether to use pipeline parallelism. pipe_parallel_size: Size of pipeline parallelism. pipe_parallel_loss_fn: Loss function for pipeline parallelism. pipe_parallel_partition_method: Partition method for pipeline parallelism. pipe_parallel_activation_checkpoint_steps: Number of steps between pipeline parallelism activation checkpoins. """ output_dir: str = field(metadata={"help": "Output folder."}) ds_config: Union[dict, str] = field( default_factory=dict, metadata={"help": "DeepSpeed configuration (dictionary or path to JSON file)."} ) do_eval: bool = field(default=True, metadata={"help": "Whether to enable evaluation."}) max_steps: int = field(default=1, metadata={"help": "Maximum number of training steps."}) logging_steps: int = field(default=10, metadata={"help": "Number of steps between logs."}) save_steps: int = field(default=500, metadata={"help": "Number of steps between checkpoints."}) seed: int = field(default=42, metadata={"help": "Random seed."}) local_rank: int = field(default=os.getenv("LOCAL_RANK", -1), metadata={"help": "Rank of process."}) backend: int = field(default="nccl", metadata={"help": "Distributed training backend."}) eval_steps: int = field(default=500, metadata={"help": "Number of steps between evaluations."}) eval_max_steps: int = field(default=None, metadata={"help": "Number of maximum steps during evaluation."}) pipe_parallel_size: int = field(default=1, metadata={"help": "Size of pipeline parallelism."}) pipe_parallel_loss_fn: callable = field(default=None, metadata={"help": "Loss function for pipeline parallelism."}) pipe_parallel_partition_method: str = field( default="parameters", metadata={"help": "Partition method for pipeline parallelism."} ) pipe_parallel_activation_checkpoint_steps: int = field( default=0, metadata={"help": "Number of steps between pipeline parallelism activation checkpoins."} ) dataloader_pin_memory: bool = field(default=True, metadata={"help": "Whether to pin the data loader memory."}) dataloader_num_workers: int = field(default=0, metadata={"help": "Number of subprocesses to use for data loading."}) def __post_init__(self) -> None: """Override post-initialization with custom instructions.""" self.output_dir = get_full_path(self.output_dir) if isinstance(self.ds_config, str): with open(self.ds_config, "r") as f: self.ds_config = json.load(f) torch.manual_seed(self.seed) deepspeed.runtime.utils.set_random_seed(self.seed) self.local_rank = int(self.local_rank) torch.cuda.set_device(self.local_rank) def to_dict(self) -> Dict[str, Any]: """Convert attributes into a dictionary representation. Returns: Attributes encoded as a dictionary. """ return asdict(self)
archai/archai/trainers/nlp/ds_training_args.py/0
{ "file_path": "archai/archai/trainers/nlp/ds_training_args.py", "repo_id": "archai", "token_count": 1320 }
351
__include__: "darts.yaml" # just use darts defaults common: log_level: 20 # logging.INFO nas: eval: model_desc: params: { 'cell_matrix' : [[0, 1, 1, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0]], 'vertex_ops': ['input', 'conv1x1-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3', 'output'], 'num_stacks': 3 # Number of stacks, each contains identical cells } model_stems: ops: ['stem_conv3x3Relu', 'stem_conv3x3Relu'] stem_multiplier: 1 # output channels for stem = 128 init_node_ch: 128 # num of input/output channels for nodes in 1st cell model_post_op: 'pool_mean_tensor' n_cells: 9 # 3 stacks, each stack with 3 cells loader: aug: '' # additional augmentations to use, for ex, fa_reduced_cifar10, arsaug, autoaug_cifar10, autoaug_extend cutout: 0 # cutout length, use cutout augmentation when > 0 train_batch: 128 # 96 is too aggressive for 1080Ti, better set it to 68 trainer: aux_weight: 0.0 drop_path_prob: 0.0 # probability that given edge will be dropped grad_clip: 5.0 # grads above this value is clipped epochs: 108 optimizer: type: 'sgd' lr: 0.025 # init learning rate decay: 1.0e-4 # pytorch default is 0.0 momentum: 0.9 # pytorch default is 0.0 nesterov: False # pytorch default is False lr_schedule: type: 'cosine' min_lr: 0.0 # min learning rate to se bet in eta_min param of scheduler
archai/confs/algos/nasbench101.yaml/0
{ "file_path": "archai/confs/algos/nasbench101.yaml", "repo_id": "archai", "token_count": 810 }
352
__include__: './dataroot.yaml' # default dataset settings are for cifar dataset: name: 'flower102' n_classes: 102 channels: 3 # number of channels in image max_batches: -1 # if >= 0 then only these many batches are generated (useful for debugging) storage_name: 'flower102' # name of folder or tar file to copy from cloud storage
archai/confs/datasets/flower102.yaml/0
{ "file_path": "archai/confs/datasets/flower102.yaml", "repo_id": "archai", "token_count": 102 }
353
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license # Root image to be based # Available images: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch/tags FROM nvcr.io/nvidia/pytorch:23.02-py3 # Labels for the docker LABEL description="NVIDIA Docker with Archai, DeepSpeed and Flash-Attention" \ repository="archai" \ tag="latest" \ creator="microsoft" \ tooltype="archai" \ createtime="03/20/2023" # Exports environment variables ENV PATH="/root/.local/bin:$PATH" # Installs basic utilities RUN apt-get update && \ apt-get install --no-install-recommends --no-install-suggests -yq && \ rm -rf /var/lib/apt/lists/* && \ apt-get purge --auto-remove && \ apt-get clean # Installs DeepSpeed and OpenMPI RUN pip install --upgrade pip && \ pip uninstall -y xgboost && \ DS_BUILD_UTILS=1 DS_BUILD_FUSED_LAMB=1 pip install deepspeed==0.8.1 && \ CC=mpicc MPICC=mpicc pip install mpi4py --no-binary mpi4py # Installs Flash-Attention and CUDA extensions for cross-entropy, fused dense, layer norm RUN pip install flash-attn==0.2.8 RUN git clone https://github.com/HazyResearch/flash-attention \ && cd flash-attention && git checkout v0.2.8 \ && cd csrc/fused_softmax && pip install . && cd ../../ \ && cd csrc/rotary && pip install . && cd ../../ \ && cd csrc/xentropy && pip install . && cd ../../ \ && cd csrc/layer_norm && pip install . && cd ../../ \ && cd csrc/fused_dense_lib && pip install . && cd ../../ \ # && cd csrc/ft_attention && pip install . && cd ../../ \ && cd .. && rm -rf flash-attention # Installs Archai RUN git clone -b main --single-branch https://github.com/microsoft/archai.git WORKDIR /workspace/archai RUN pip install --user --no-cache-dir .[dev]
archai/docker/Dockerfile.flash/0
{ "file_path": "archai/docker/Dockerfile.flash", "repo_id": "archai", "token_count": 666 }
354
Discrete Search =============== .. toctree:: :maxdepth: 2 Search Spaces <discrete_search/search_space.ipynb> Evaluators <discrete_search/evaluators.ipynb> Algorithms <discrete_search/algos.ipynb> Configuration-based Search <discrete_search/config_search.ipynb>
archai/docs/getting_started/notebooks/discrete_search.rst/0
{ "file_path": "archai/docs/getting_started/notebooks/discrete_search.rst", "repo_id": "archai", "token_count": 101 }
355
<jupyter_start><jupyter_text>Exporting Models to ONNXExporting a pre-trained model to ONNX involves converting the model into a common format that can be easily integrated and deployed across different platforms. The conversion can be done using a tool or library, which converts the model's architecture, weights, and configurations. This allows the model to be used in various applications, such as edge devices, cloud services, and web-based systems, with improved compatibility and performance. Loading the ModelThe first step is to load any NLP-related model. In this notebook, we will be using a pre-trained GPT-2 model from the Hugging Face's Hub.<jupyter_code>from transformers import GPT2LMHeadModel model = GPT2LMHeadModel.from_pretrained("gpt2")<jupyter_output><empty_output><jupyter_text>Exporting to ONNXAfter the model has been loaded, we call Archai's `export_to_onnx()` method, which wraps all the inner computation of an ONNX export. Additionally, it supports a set of arguments that can be defined according to the input model and task, such as:* `task`: Task identifier to use proper inputs/outputs.* `use_past`: Whether to include past key/values in the model.* `validate`: Whether to validate the exported model.* `share_weights`: Whether to share the embedding and softmax weights.* `opset`: Set of operations to use with ONNX.* `atol`: Tolerance between input and exported model.<jupyter_code>from archai.common.file_utils import calculate_onnx_model_size from archai.onnx.export import export_to_onnx onnx_model_path = "model.onnx" onnx_config = export_to_onnx( model, onnx_model_path, task="causal-lm", use_past=True, share_weights=True, opset=11, atol=1e-4, ) print(f"Model: {calculate_onnx_model_size(onnx_model_path)}MB")<jupyter_output>2023-03-21 15:16:14,303 - archai.onnx.export — INFO — Exporting model: model.onnx<jupyter_text>Post-Export OptimizationFor Transformer-based models, ONNX Runtime offers a set of post-optimization tools that enables node fusion and hence, a more optimized graph. Thus, we can call `optimize_onnx()` passing the path of the previously exported ONNX model.*The prints compares the models' sizes, but is highly recommended to use an external graph inspection tool, such as Netron.*<jupyter_code>from archai.onnx.optimization import optimize_onnx ort_model_path = optimize_onnx(onnx_model_path, onnx_config, opt_level=1) print(f"Model-OPT: {calculate_onnx_model_size(ort_model_path)}MB")<jupyter_output>2023-03-21 15:16:32,958 - archai.onnx.optimization — INFO — Optimizing model: model.onnx Model-OPT: 498.940725MB<jupyter_text>Post-Training Quantization (PTQ)Finally, either the exported or post-optimized models can be dynamically quantized using the `dynamic_quantization_onnx()` method.Nevertheless, please note that if the model has not been pre-trained with Quantization Aware Training (QAT), it might produce different logits and have its performance diminished.<jupyter_code>from archai.quantization.ptq import dynamic_quantization_onnx qnt_model_path = dynamic_quantization_onnx(ort_model_path) print(f"Model-QNT: {calculate_onnx_model_size(qnt_model_path)}MB")<jupyter_output>2023-03-21 15:16:49,535 - archai.quantization.ptq — INFO — Quantizing model: model-opt.onnx
archai/docs/getting_started/notebooks/nlp/onnx_export.ipynb/0
{ "file_path": "archai/docs/getting_started/notebooks/nlp/onnx_export.ipynb", "repo_id": "archai", "token_count": 1065 }
356
Search Algorithms ================= BANANAS ------- .. automodule:: archai.discrete_search.algos.bananas :members: :undoc-members: Evolution Pareto ---------------- .. automodule:: archai.discrete_search.algos.evolution_pareto :members: :undoc-members: Local Search ------------ .. automodule:: archai.discrete_search.algos.local_search :members: :undoc-members: Random Search ------------- .. automodule:: archai.discrete_search.algos.random_search :members: :undoc-members: Regularized Evolution --------------------- .. automodule:: archai.discrete_search.algos.regularized_evolution :members: :undoc-members: Successive Halving ------------------ .. automodule:: archai.discrete_search.algos.successive_halving :members: :undoc-members:
archai/docs/reference/api/archai.discrete_search.algos.rst/0
{ "file_path": "archai/docs/reference/api/archai.discrete_search.algos.rst", "repo_id": "archai", "token_count": 276 }
357
Utilities ========= Multi-Objective --------------- .. automodule:: archai.discrete_search.utils.multi_objective :members: :undoc-members:
archai/docs/reference/api/archai.discrete_search.utils.rst/0
{ "file_path": "archai/docs/reference/api/archai.discrete_search.utils.rst", "repo_id": "archai", "token_count": 50 }
358
Providers ========= Aicraft ------- .. automodule:: archai.supergraph.datasets.providers.aircraft_provider :members: :undoc-members: CIFAR-100 --------- .. automodule:: archai.supergraph.datasets.providers.cifar100_provider :members: :undoc-members: CIFAR-10 -------- .. automodule:: archai.supergraph.datasets.providers.cifar10_provider :members: :undoc-members: Fashion-MNIST ------------- .. automodule:: archai.supergraph.datasets.providers.fashion_mnist_provider :members: :undoc-members: Flower-102 ---------- .. automodule:: archai.supergraph.datasets.providers.flower102_provider :members: :undoc-members: Food-101 -------- .. automodule:: archai.supergraph.datasets.providers.food101_provider :members: :undoc-members: ImageNet Folder --------------- .. automodule:: archai.supergraph.datasets.providers.imagenet_folder :members: :undoc-members: ImageNet -------- .. automodule:: archai.supergraph.datasets.providers.imagenet_provider :members: :undoc-members: MIT-67 ------ .. automodule:: archai.supergraph.datasets.providers.mit67_provider :members: :undoc-members: MNIST ----- .. automodule:: archai.supergraph.datasets.providers.mnist_provider :members: :undoc-members: Person-COCO ----------- .. automodule:: archai.supergraph.datasets.providers.person_coco_provider :members: :undoc-members: Sport-8 ------- .. automodule:: archai.supergraph.datasets.providers.sport8_provider :members: :undoc-members: StanfordCars ------------ .. automodule:: archai.supergraph.datasets.providers.stanfordcars_provider :members: :undoc-members: SVHN ---- .. automodule:: archai.supergraph.datasets.providers.svhn_provider :members: :undoc-members:
archai/docs/reference/api/archai.supergraph.datasets.providers.rst/0
{ "file_path": "archai/docs/reference/api/archai.supergraph.datasets.providers.rst", "repo_id": "archai", "token_count": 684 }
359
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. [pytest] addopts=-vv --durations=10 # Do not run tests in the build folder norecursedirs=build # Warnings to be ignored filterwarnings= ignore::DeprecationWarning ignore::UserWarning ignore::torch.jit.TracerWarning
archai/pytest.ini/0
{ "file_path": "archai/pytest.ini", "repo_id": "archai", "token_count": 98 }
360
<jupyter_start><jupyter_text>How-To Evaluate Models with LM-Eval HarnessThe `lm_eval_harness` research project implements a wrapper over the `lm_eval` framework, provided by EleutherAI. It is designed to make it easy to evaluate NLP models and compare their performance. In this tutorial, we will walk through the process of evaluating NLP models with `lm_eval_harness`, including how to set up the framework, how to use it to evaluate models, and how to interpret the results. InstallationThe `lm_eval_harness` project is designed to be an installable module, which allow users to call it from outside its package. Thus, one can install it as follows:<jupyter_code>try: import lm_eval_harness except ModuleNotFoundError: !pip install git+https://github.com/microsoft/archai.git@pre-release#subdirectory=research/lm_eval_harness<jupyter_output>Collecting git+https://github.com/microsoft/archai.git@pre-release#subdirectory=research/lm_eval_harness Cloning https://github.com/microsoft/archai.git (to revision pre-release) to c:\users\gderosa\appdata\local\temp\pip-req-build-2q9113pq Resolved https://github.com/microsoft/archai.git to commit 81ffdd907b9485e3663f1ddbf32e2f862a65f4fe<jupyter_text>Wrap Requirements (Model and Tokenizer)The first step is to load a model (instances of `torch.nn.Module`) and the tokenizer (instances of `transformers.AutoTokenizer`).In this example, we will load the pre-trained `gpt2` from the Hugging Face Hub and its tokenizer:<jupyter_code>from transformers import AutoModelForCausalLM, AutoTokenizer from lm_eval_harness.lm_eval_hf_model import HFEvalModel model = AutoModelForCausalLM.from_pretrained("gpt2") tokenizer = AutoTokenizer.from_pretrained("gpt2") hf_model = HFEvalModel(model, tokenizer)<jupyter_output><empty_output><jupyter_text>Start the EvaluationAfter the model and tokenizer have been loaded, evaluating a model is simple as calling the `evaluate_wrapper()` function.Note that we opted to create a wrapper over the `lm_eval.evaluator.evaluate()` method to supply our research demands, which consists in easily prototyping new models based of Hugging Face. Nevertheless, one is always allowed to bring own models and additional functionalities that might be needed. Required Arguments* `hf_model`: An instance of a model and tokenizer that have been wrapped with the `HFEvalModel` class.* `tasks`: A list of string-based tasks identifiers. Optional Arguments* `num_fewshot`: Number of few-shot samples, defaults to `0`.* `no_cache`: Disables the caching mechanism and re-computes the predictions, defaults to `False`.<jupyter_code>from lm_eval.tasks import ALL_TASKS from lm_eval_harness.lm_eval_evaluator import evaluate_wrapper print(f"List of tasks: {ALL_TASKS}") outputs = evaluate_wrapper( hf_model, ["copa"], num_fewshot=0, no_cache=True, )<jupyter_output>Could not import signal.SIGPIPE (this is expected on Windows machines)<jupyter_text>Formatting the OutputsAfter the predictions have been computed, they are saved in an `outputs` variable (dictionary). However, `lm_eval` provides an additional function, denoted as `make_table()` that formats the outputs into a readable table:<jupyter_code>from lm_eval.evaluator import make_table print(make_table(outputs))<jupyter_output>|Task|Version|Metric|Value| |Stderr| |----|------:|------|----:|---|-----:| |copa| 0|acc | 0.64|± |0.0482|
archai/research/lm_eval_harness/tutorials/simple_evaluation.ipynb/0
{ "file_path": "archai/research/lm_eval_harness/tutorials/simple_evaluation.ipynb", "repo_id": "archai", "token_count": 1082 }
361
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. """ Script to prepare food101 dataset for pytorch dataloader. This script assumes that one has downloaded and extracted the full food101 dataset from ETHZ. Invoke the script as $ python food101.py --rootdir /path/to/food101. It will create 'train' and 'test' folders inside the root folder filled with the official train and test splits. The folder structure in 'train' and 'test' respect that needed for pytorch torchvision.datasets.ImageFolder to work. """ import argparse import os import pathlib import tempfile from torch.utils.model_zoo import tqdm from torchvision.datasets.utils import download_and_extract_archive from archai.common import utils def copy_file_list(file_list, src_dir, dest_dir): with tqdm(total=len(file_list)) as pbar: for i, filename in enumerate(file_list): filename = filename.strip() if filename: # convert / to os-specific dir separator filename_parts = (filename + ".jpg").split("/") target = os.path.join(dest_dir, *filename_parts) if not os.path.isfile(target): utils.copy_file(os.path.join(src_dir, *filename_parts), target) pbar.update(1) def prepare_data(dataroot: str) -> None: meta_path = os.path.join(dataroot, "food-101", "meta") images_path = os.path.join(dataroot, "food-101", "images") train_path = os.path.join(dataroot, "food-101", "train") test_path = os.path.join(dataroot, "food-101", "test") train_list = pathlib.Path(os.path.join(meta_path, "train.txt")).read_text().splitlines() test_list = pathlib.Path(os.path.join(meta_path, "test.txt")).read_text().splitlines() class_list = pathlib.Path(os.path.join(meta_path, "classes.txt")).read_text().splitlines() for class_name in class_list: class_name = class_name.strip() if class_name: os.makedirs(os.path.join(train_path, class_name), exist_ok=True) os.makedirs(os.path.join(test_path, class_name), exist_ok=True) copy_file_list(train_list, images_path, train_path) copy_file_list(test_list, images_path, test_path) def download(dataroot: str): DOWNLOAD_URL = "https://data.vision.ee.ethz.ch/cvl/food-101.tar.gz" download_and_extract_archive(DOWNLOAD_URL, tempfile.tempdir, extract_root=dataroot, remove_finished=True) if __name__ == "__main__": # download() parser = argparse.ArgumentParser() parser.add_argument( "--dataroot", type=str, default="d:\\datasets", help="root directory where food-101 folder exist (downloaded and extracted from ETHZ)", ) args = parser.parse_args() prepare_data(args.dataroot)
archai/scripts/supergraph/download_datasets/food101_install.py/0
{ "file_path": "archai/scripts/supergraph/download_datasets/food101_install.py", "repo_id": "archai", "token_count": 1092 }
362
import logging import random from archai.supergraph.algos.nasbench101 import model_builder from archai.supergraph.algos.nasbench101.nasbench101_dataset import Nasbench101Dataset def main(): logging.getLogger().setLevel(logging.DEBUG) # create dataset nsds = Nasbench101Dataset("~/dataroot/nasbench_ds/nasbench_full.pkl") # create model by index model = nsds.create_model(42) print(model) model4 = nsds[4] print("model4", model4) # query for specific model data = nsds.query(model_builder.EXAMPLE_DESC_MATRIX, model_builder.EXAMPLE_VERTEX_OPS) print("queried", data) # sample model # nsds is list type object of model statistics num_models = len(nsds) data = nsds[random.randint(0, num_models - 1)] print("random", data) # nsds is pre-sorted by avg test accuracy print("worst acc", nsds.get_test_acc(0)) print("worst acc uninit", nsds.get_test_acc(0, step_index=0)) print("best acc", nsds.get_test_acc(len(nsds) - 1)) print("best acc uninit", nsds.get_test_acc(len(nsds) - 1, step_index=0)) print("best acc epoch 4", nsds.get_test_acc(len(nsds) - 1, epochs=4)) print("best acc epoch 36", nsds.get_test_acc(len(nsds) - 1, epochs=36)) if __name__ == "__main__": main()
archai/scripts/supergraph/nasbench101/query_test.py/0
{ "file_path": "archai/scripts/supergraph/nasbench101/query_test.py", "repo_id": "archai", "token_count": 506 }
363
import argparse import csv def read_status(filename): header = None with open(filename, 'r') as f: reader = csv.reader(f, delimiter=',') for row in reader: header = [x.strip() for x in row] break result = {} with open(filename, 'r') as f: reader = csv.DictReader(f, fieldnames=header, delimiter=',') next(reader) # skip the header row. for row in reader: if 'name' in row: key = row['name'] result[key] = row return result def compare(file1, file2): m1 = read_status(file1) m2 = read_status(file2) for key in m1: if key in m2: r1 = m1[key] r2 = m2[key] if 'mean' not in r1: print(f'model {key} in {file1} is missing: mean') elif 'mean' not in r2: print(f'model {key} is {file2} missing: mean') elif 'f1_1k' not in r1: print(f'model {key} in {file1} is missing: f1_1k') elif 'f1_1k' not in r2: print(f'model {key} is {file2} missing: f1_1k') else: print(f"{key}, {r1['mean']}, {r2['mean']}, {r1['f1_1k']}, {r2['f1_1k']}") else: print(f'model {key} NOT FOUND in {file2}') if __name__ == '__main__': parser = argparse.ArgumentParser( description='Compare the results from 2 status files in .csv format.') parser.add_argument('file1', help='The first .csv file name.') parser.add_argument('file2', help='The second .csv file name.') args = parser.parse_args() compare(args.file1, args.file2)
archai/tasks/face_segmentation/aml/azure/compare_status.py/0
{ "file_path": "archai/tasks/face_segmentation/aml/azure/compare_status.py", "repo_id": "archai", "token_count": 847 }
364
apiVersion: apps/v1 kind: Deployment metadata: name: snpe-quantizer namespace: snpe spec: replicas: 1 selector: matchLabels: app: snpe-quantizer template: metadata: labels: app: snpe-quantizer spec: affinity: # See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#more-practical-use-cases # The quantizer is processor intensive, so we do not want more than one per node. podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: app operator: In values: - snpe-quantizer topologyKey: "kubernetes.io/hostname" containers: - name: snpe-quantizer image: snpecontainerregistry001.azurecr.io/quantizer:1.0 resources: limits: cpu: 4 env: - name: MODEL_STORAGE_CONNECTION_STRING value: $MSCS$ --- apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: name: snpe-quantizer namespace: snpe spec: maxReplicas: 100 # define max replica count minReplicas: 1 # define min replica count scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: snpe-quantizer targetCPUUtilizationPercentage: 40 # target CPU utilization
archai/tasks/face_segmentation/aml/docker/quantizer/quantizer.template.yaml/0
{ "file_path": "archai/tasks/face_segmentation/aml/docker/quantizer/quantizer.template.yaml", "repo_id": "archai", "token_count": 615 }
365
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Any, Dict, List, Optional, Tuple, Union from overrides import overrides from archai.discrete_search.evaluators import AvgOnnxLatency from archai.discrete_search.api.archai_model import ArchaiModel from archai.common.store import ArchaiStore class AvgOnnxLatencyEvaluator(AvgOnnxLatency): """Evaluate the average ONNX Latency (in seconds) of an architecture and store the result. The latency is measured by running the model on random inputs and averaging the latency over `num_trials` trials. """ def __init__( self, input_shape: Union[Tuple[int, ...], List[Tuple[int, ...]]], num_trials: Optional[int] = 1, input_dtype: Optional[str] = "torch.FloatTensor", rand_range: Optional[Tuple[float, float]] = (0.0, 1.0), export_kwargs: Optional[Dict[str, Any]] = None, device: Optional[str] = 'cpu', inf_session_kwargs: Optional[Dict[str, Any]] = None, store: ArchaiStore = None, metric_key: str = 'onnx_latency' ) -> None: super(AvgOnnxLatencyEvaluator, self).__init__( input_shape, num_trials, input_dtype, rand_range, export_kwargs, device, inf_session_kwargs) self.store = store self.metric_key = metric_key self.iteration = 1 @overrides def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float: archid = f'id_{model.archid}' if self.store is not None: e = self.store.get_status(archid) if 'iteration' not in e or e['iteration'] != self.iteration: e['iteration'] = self.iteration self.store.merge_status_entity(e) if self.metric_key in e: # Use the cached value and skip the more expensive Onnx evaluation. # This also ensures maximum re-use of previous training jobs. return e[self.metric_key] result = super(AvgOnnxLatencyEvaluator, self).evaluate(model, budget) if self.store is not None: e = self.store.get_status(archid) e['status'] = 'complete' e[self.metric_key] = result self.store.merge_status_entity(e) return result def on_start_iteration(self, iteration: int): self.iteration = iteration + 1
archai/tasks/face_segmentation/aml/training/onnx_latency.py/0
{ "file_path": "archai/tasks/face_segmentation/aml/training/onnx_latency.py", "repo_id": "archai", "token_count": 1085 }
366
import os import numpy as np import tempfile import uuid from archai.common.store import ArchaiStore CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING' def test_store(): con_str = os.getenv(CONNECTION_NAME) if not con_str: print(f"Skipping test_store because you have no {CONNECTION_NAME} environment variable.") return storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str) store = ArchaiStore(storage_account_name, storage_account_key, table_name='unittest') name = str(uuid.uuid4()) try: entities = store.get_all_status_entities() assert len([x for x in entities if x['name'] == name]) == 0 e = store.get_existing_status(name) assert e is None e = store.get_status(name) assert e['status'] == 'new' e['status'] = 'running' e['accuracy'] = np.array([1.234])[0] # test np.float e['params'] = 9223372036854775800 store.update_status_entity(e) e = store.get_status(name) assert e['status'] == 'running' assert e['accuracy'] == 1.234 assert e['params'] == 9223372036854775800 entities = store.get_all_status_entities('status', 'running') assert len([x for x in entities if x['name'] == name]) == 1 store.delete_status_entity(e) entities = store.get_all_status_entities() assert len([x for x in entities if x['name'] == name]) == 0 store.lock(name, 'uploading') assert store.is_locked(name) assert store.is_locked_by_self(name) e = store.get_status(name) assert e['status'] == 'uploading' path = os.path.realpath(__file__) filename = os.path.basename(path) store.upload_blob(name, path) store.unlock(name) assert not store.is_locked(name) e = store.get_existing_status(name) e['node'] = 'fake' store.merge_status_entity(e) f = store.lock_entity(e, 'busy') assert f is None with tempfile.TemporaryDirectory() as tmpdir: store.download(name, tmpdir) assert os.path.exists(os.path.join(tmpdir, filename)) finally: store.delete_blobs(name) store.delete_status_entity(e)
archai/tests/common/test_store.py/0
{ "file_path": "archai/tests/common/test_store.py", "repo_id": "archai", "token_count": 990 }
367
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os import pytest from archai.discrete_search.algos.bananas import MoBananasSearch @pytest.fixture(scope="session") def output_dir(tmp_path_factory): return tmp_path_factory.mktemp("out_bananas") def test_bananas(output_dir, search_space, search_objectives, surrogate_model): algo = MoBananasSearch( search_space=search_space, search_objectives=search_objectives, output_dir=output_dir, surrogate_model=surrogate_model, num_iters=2, init_num_models=5, mutations_per_parent=2, num_parents=4, num_candidates=8, ) search_results = algo.search() assert len(os.listdir(output_dir)) > 0 df = search_results.get_search_state_df() assert all(0 <= x <= 0.4 for x in df["Random1"].tolist()) all_models = [m for iter_r in search_results.results for m in iter_r["models"]] # Checks if all registered models satisfy constraints _, valid_models = search_objectives.validate_constraints(all_models) assert len(valid_models) == len(all_models)
archai/tests/discrete_search/algos/test_bananas.py/0
{ "file_path": "archai/tests/discrete_search/algos/test_bananas.py", "repo_id": "archai", "token_count": 441 }
368
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Callable from archai.discrete_search.evaluators.functional import EvaluationFunction def test_evaluation_function(): evaluator = EvaluationFunction(lambda a, b: 1) # Assert that evaluator can evaluate the argument function value = evaluator.evaluate(None, None) assert isinstance(evaluator.evaluation_fn, Callable) assert value == 1
archai/tests/discrete_search/evaluators/test_functional.py/0
{ "file_path": "archai/tests/discrete_search/evaluators/test_functional.py", "repo_id": "archai", "token_count": 131 }
369
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os from transformers import GPT2Config, GPT2LMHeadModel from archai.onnx.config_utils.onnx_config_base import OnnxConfig from archai.onnx.export import export_to_onnx def test_export_to_onnx(): model = GPT2LMHeadModel(config=GPT2Config(vocab_size=1, n_layer=1)) onnx_model_path = "temp_model.onnx" # Assert that the `onnx_config` is returned onnx_config = export_to_onnx(model, onnx_model_path) assert isinstance(onnx_config, OnnxConfig) # Assert that the `onnx_config` is returned when `use_past` is set to `False` onnx_config = export_to_onnx(model, onnx_model_path, use_past=False) assert isinstance(onnx_config, OnnxConfig) # Assert that the `onnx_config` is returned when `share_weights` is set to `False` onnx_config = export_to_onnx(model, onnx_model_path, share_weights=False) assert isinstance(onnx_config, OnnxConfig) os.remove(onnx_model_path)
archai/tests/onnx/test_export.py/0
{ "file_path": "archai/tests/onnx/test_export.py", "repo_id": "archai", "token_count": 396 }
370
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import Callable import torch from archai.common.common import common_init from archai.supergraph.nas.model import Model from archai.supergraph.nas.model_desc_builder import ModelDescBuilder def requires_gpu(test_fn: Callable): if torch.cuda.is_available() and torch.cuda.device_count() > 0: return test_fn() return @requires_gpu def test_darts_zero_model(): conf = common_init(config_filepath="confs/algos/darts.yaml") conf_search = conf["nas"]["search"] model_desc = conf_search["model_desc"] model_desc_builder = ModelDescBuilder() model_desc = model_desc_builder.build(model_desc) m = Model(model_desc, False, True) y, aux = m(torch.rand((1, 3, 32, 32))) assert isinstance(y, torch.Tensor) and y.shape == (1, 10) and aux is None @requires_gpu def test_petridish_zero_model(): conf = common_init(config_filepath="confs/algos/petridish_toy.yaml") conf_search = conf["nas"]["search"] model_desc = conf_search["model_desc"] model_desc_builder = ModelDescBuilder() model_desc = model_desc_builder.build(model_desc) m = Model(model_desc, False, True) y, aux = m(torch.rand((1, 3, 32, 32))) assert isinstance(y, torch.Tensor) and y.shape == (1, 10) and aux is None
archai/tests/supergraph/test_zero_model.py/0
{ "file_path": "archai/tests/supergraph/test_zero_model.py", "repo_id": "archai", "token_count": 493 }
371
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest import Serializer, Deserializer from ...client import Client from ...v7_0.member_entitlement_management import models class MemberEntitlementManagementClient(Client): """MemberEntitlementManagement :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """ def __init__(self, base_url=None, creds=None): super(MemberEntitlementManagementClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '68ddce18-2501-45f1-a17b-7931a9922690' def add_group_entitlement(self, group_entitlement, rule_option=None): """AddGroupEntitlement. Create a group entitlement with license rule, extension rule. :param :class:`<GroupEntitlement> <azure.devops.v7_0.member_entitlement_management.models.GroupEntitlement>` group_entitlement: GroupEntitlement object specifying License Rule, Extensions Rule for the group. Based on the rules the members of the group will be given licenses and extensions. The Group Entitlement can be used to add the group to another project level groups :param str rule_option: RuleOption [ApplyGroupRule/TestApplyGroupRule] - specifies if the rules defined in group entitlement should be created and applied to it’s members (default option) or just be tested :rtype: :class:`<GroupEntitlementOperationReference> <azure.devops.v7_0.member_entitlement_management.models.GroupEntitlementOperationReference>` """ query_parameters = {} if rule_option is not None: query_parameters['ruleOption'] = self._serialize.query('rule_option', rule_option, 'str') content = self._serialize.body(group_entitlement, 'GroupEntitlement') response = self._send(http_method='POST', location_id='2280bffa-58a2-49da-822e-0764a1bb44f7', version='7.0', query_parameters=query_parameters, content=content) return self._deserialize('GroupEntitlementOperationReference', response) def delete_group_entitlement(self, group_id, rule_option=None, remove_group_membership=None): """DeleteGroupEntitlement. Delete a group entitlement. :param str group_id: ID of the group to delete. :param str rule_option: RuleOption [ApplyGroupRule/TestApplyGroupRule] - specifies if the rules defined in group entitlement should be deleted and the changes are applied to it’s members (default option) or just be tested :param bool remove_group_membership: Optional parameter that specifies whether the group with the given ID should be removed from all other groups :rtype: :class:`<GroupEntitlementOperationReference> <azure.devops.v7_0.member_entitlement_management.models.GroupEntitlementOperationReference>` """ route_values = {} if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') query_parameters = {} if rule_option is not None: query_parameters['ruleOption'] = self._serialize.query('rule_option', rule_option, 'str') if remove_group_membership is not None: query_parameters['removeGroupMembership'] = self._serialize.query('remove_group_membership', remove_group_membership, 'bool') response = self._send(http_method='DELETE', location_id='2280bffa-58a2-49da-822e-0764a1bb44f7', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('GroupEntitlementOperationReference', response) def get_group_entitlement(self, group_id): """GetGroupEntitlement. Get a group entitlement. :param str group_id: ID of the group. :rtype: :class:`<GroupEntitlement> <azure.devops.v7_0.member_entitlement_management.models.GroupEntitlement>` """ route_values = {} if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') response = self._send(http_method='GET', location_id='2280bffa-58a2-49da-822e-0764a1bb44f7', version='7.0', route_values=route_values) return self._deserialize('GroupEntitlement', response) def update_group_entitlement(self, document, group_id, rule_option=None): """UpdateGroupEntitlement. Update entitlements (License Rule, Extensions Rule, Project memberships etc.) for a group. :param :class:`<[JsonPatchOperation]> <azure.devops.v7_0.member_entitlement_management.models.[JsonPatchOperation]>` document: JsonPatchDocument containing the operations to perform on the group. :param str group_id: ID of the group. :param str rule_option: RuleOption [ApplyGroupRule/TestApplyGroupRule] - specifies if the rules defined in group entitlement should be updated and the changes are applied to it’s members (default option) or just be tested :rtype: :class:`<GroupEntitlementOperationReference> <azure.devops.v7_0.member_entitlement_management.models.GroupEntitlementOperationReference>` """ route_values = {} if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') query_parameters = {} if rule_option is not None: query_parameters['ruleOption'] = self._serialize.query('rule_option', rule_option, 'str') content = self._serialize.body(document, '[JsonPatchOperation]') response = self._send(http_method='PATCH', location_id='2280bffa-58a2-49da-822e-0764a1bb44f7', version='7.0', route_values=route_values, query_parameters=query_parameters, content=content, media_type='application/json-patch+json') return self._deserialize('GroupEntitlementOperationReference', response) def get_group_entitlements(self): """GetGroupEntitlements. Get the group entitlements for an account. :rtype: [GroupEntitlement] """ response = self._send(http_method='GET', location_id='9bce1f43-2629-419f-8f6c-7503be58a4f3', version='7.0') return self._deserialize('[GroupEntitlement]', self._unwrap_collection(response)) def add_member_to_group(self, group_id, member_id): """AddMemberToGroup. Add a member to a Group. :param str group_id: Id of the Group. :param str member_id: Id of the member to add. """ route_values = {} if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if member_id is not None: route_values['memberId'] = self._serialize.url('member_id', member_id, 'str') self._send(http_method='PUT', location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8', version='7.0', route_values=route_values) def get_group_members(self, group_id, max_results=None, paging_token=None): """GetGroupMembers. Get direct members of a Group. :param str group_id: Id of the Group. :param int max_results: Maximum number of results to retrieve. :param str paging_token: Paging Token from the previous page fetched. If the 'pagingToken' is null, the results would be fetched from the beginning of the Members List. :rtype: :class:`<PagedGraphMemberList> <azure.devops.v7_0.member_entitlement_management.models.PagedGraphMemberList>` """ route_values = {} if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') query_parameters = {} if max_results is not None: query_parameters['maxResults'] = self._serialize.query('max_results', max_results, 'int') if paging_token is not None: query_parameters['pagingToken'] = self._serialize.query('paging_token', paging_token, 'str') response = self._send(http_method='GET', location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('PagedGraphMemberList', response) def remove_member_from_group(self, group_id, member_id): """RemoveMemberFromGroup. Remove a member from a Group. :param str group_id: Id of the group. :param str member_id: Id of the member to remove. """ route_values = {} if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if member_id is not None: route_values['memberId'] = self._serialize.url('member_id', member_id, 'str') self._send(http_method='DELETE', location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8', version='7.0', route_values=route_values) def add_user_entitlement(self, user_entitlement): """AddUserEntitlement. Add a user, assign license and extensions and make them a member of a project group in an account. :param :class:`<UserEntitlement> <azure.devops.v7_0.member_entitlement_management.models.UserEntitlement>` user_entitlement: UserEntitlement object specifying License, Extensions and Project/Team groups the user should be added to. :rtype: :class:`<UserEntitlementsPostResponse> <azure.devops.v7_0.member_entitlement_management.models.UserEntitlementsPostResponse>` """ content = self._serialize.body(user_entitlement, 'UserEntitlement') response = self._send(http_method='POST', location_id='387f832c-dbf2-4643-88e9-c1aa94dbb737', version='7.0', content=content) return self._deserialize('UserEntitlementsPostResponse', response) def search_user_entitlements(self, continuation_token=None, select=None, filter=None, order_by=None): """SearchUserEntitlements. Get a paged set of user entitlements matching the filter and sort criteria built with properties that match the select input. :param str continuation_token: Continuation token for getting the next page of data set. If null is passed, gets the first page. :param str select: Comma (",") separated list of properties to select in the result entitlements. names of the properties are - 'Projects, 'Extensions' and 'Grouprules'. :param str filter: Equality operators relating to searching user entitlements seperated by and clauses. Valid filters include: licenseId, licenseStatus, userType, and name. licenseId: filters based on license assignment using license names. i.e. licenseId eq 'Account-Stakeholder' or licenseId eq 'Account-Express'. licenseStatus: filters based on license status. currently only supports disabled. i.e. licenseStatus eq 'Disabled'. To get disabled basic licenses, you would pass (licenseId eq 'Account-Express' and licenseStatus eq 'Disabled') userType: filters off identity type. Suppored types are member or guest i.e. userType eq 'member'. name: filters on if the user's display name or email contians given input. i.e. get all users with "test" in email or displayname is "name eq 'test'". A valid query could be: (licenseId eq 'Account-Stakeholder' or (licenseId eq 'Account-Express' and licenseStatus eq 'Disabled')) and name eq 'test' and userType eq 'guest'. :param str order_by: PropertyName and Order (separated by a space ( )) to sort on (e.g. lastAccessed desc). Order defaults to ascending. valid properties to order by are dateCreated, lastAccessed, and name :rtype: :class:`<PagedGraphMemberList> <azure.devops.v7_0.member_entitlement_management.models.PagedGraphMemberList>` """ query_parameters = {} if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if select is not None: query_parameters['select'] = self._serialize.query('select', select, 'str') if filter is not None: query_parameters['$filter'] = self._serialize.query('filter', filter, 'str') if order_by is not None: query_parameters['$orderBy'] = self._serialize.query('order_by', order_by, 'str') response = self._send(http_method='GET', location_id='387f832c-dbf2-4643-88e9-c1aa94dbb737', version='7.0', query_parameters=query_parameters) return self._deserialize('PagedGraphMemberList', response) def update_user_entitlements(self, document, do_not_send_invite_for_new_users=None): """UpdateUserEntitlements. Edit the entitlements (License, Extensions, Projects, Teams etc) for one or more users. :param :class:`<[JsonPatchOperation]> <azure.devops.v7_0.member_entitlement_management.models.[JsonPatchOperation]>` document: JsonPatchDocument containing the operations to perform. :param bool do_not_send_invite_for_new_users: Whether to send email invites to new users or not :rtype: :class:`<UserEntitlementOperationReference> <azure.devops.v7_0.member_entitlement_management.models.UserEntitlementOperationReference>` """ query_parameters = {} if do_not_send_invite_for_new_users is not None: query_parameters['doNotSendInviteForNewUsers'] = self._serialize.query('do_not_send_invite_for_new_users', do_not_send_invite_for_new_users, 'bool') content = self._serialize.body(document, '[JsonPatchOperation]') response = self._send(http_method='PATCH', location_id='387f832c-dbf2-4643-88e9-c1aa94dbb737', version='7.0', query_parameters=query_parameters, content=content, media_type='application/json-patch+json') return self._deserialize('UserEntitlementOperationReference', response) def delete_user_entitlement(self, user_id): """DeleteUserEntitlement. Delete a user from the account. :param str user_id: ID of the user. """ route_values = {} if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') self._send(http_method='DELETE', location_id='8480c6eb-ce60-47e9-88df-eca3c801638b', version='7.0', route_values=route_values) def get_user_entitlement(self, user_id): """GetUserEntitlement. Get User Entitlement for a user. :param str user_id: ID of the user. :rtype: :class:`<UserEntitlement> <azure.devops.v7_0.member_entitlement_management.models.UserEntitlement>` """ route_values = {} if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') response = self._send(http_method='GET', location_id='8480c6eb-ce60-47e9-88df-eca3c801638b', version='7.0', route_values=route_values) return self._deserialize('UserEntitlement', response) def update_user_entitlement(self, document, user_id): """UpdateUserEntitlement. Edit the entitlements (License, Extensions, Projects, Teams etc) for a user. :param :class:`<[JsonPatchOperation]> <azure.devops.v7_0.member_entitlement_management.models.[JsonPatchOperation]>` document: JsonPatchDocument containing the operations to perform on the user. :param str user_id: ID of the user. :rtype: :class:`<UserEntitlementsPatchResponse> <azure.devops.v7_0.member_entitlement_management.models.UserEntitlementsPatchResponse>` """ route_values = {} if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') content = self._serialize.body(document, '[JsonPatchOperation]') response = self._send(http_method='PATCH', location_id='8480c6eb-ce60-47e9-88df-eca3c801638b', version='7.0', route_values=route_values, content=content, media_type='application/json-patch+json') return self._deserialize('UserEntitlementsPatchResponse', response) def get_users_summary(self, select=None): """GetUsersSummary. Get summary of Licenses, Extension, Projects, Groups and their assignments in the collection. :param str select: Comma (",") separated list of properties to select. Supported property names are {AccessLevels, Licenses, Projects, Groups}. :rtype: :class:`<UsersSummary> <azure.devops.v7_0.member_entitlement_management.models.UsersSummary>` """ query_parameters = {} if select is not None: query_parameters['select'] = self._serialize.query('select', select, 'str') response = self._send(http_method='GET', location_id='5ae55b13-c9dd-49d1-957e-6e76c152e3d9', version='7.0', query_parameters=query_parameters) return self._deserialize('UsersSummary', response)
azure-devops-python-api/azure-devops/azure/devops/released/member_entitlement_management/member_entitlement_management_client.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/released/member_entitlement_management/member_entitlement_management_client.py", "repo_id": "azure-devops-python-api", "token_count": 7781 }
372
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest import Serializer, Deserializer from ...client import Client from ...v7_0.project_analysis import models class ProjectAnalysisClient(Client): """ProjectAnalysis :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """ def __init__(self, base_url=None, creds=None): super(ProjectAnalysisClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '7658fa33-b1bf-4580-990f-fac5896773d3' def get_project_language_analytics(self, project): """GetProjectLanguageAnalytics. :param str project: Project ID or project name :rtype: :class:`<ProjectLanguageAnalytics> <azure.devops.v7_0.project_analysis.models.ProjectLanguageAnalytics>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') response = self._send(http_method='GET', location_id='5b02a779-1867-433f-90b7-d23ed5e33e57', version='7.0', route_values=route_values) return self._deserialize('ProjectLanguageAnalytics', response) def get_project_activity_metrics(self, project, from_date, aggregation_type): """GetProjectActivityMetrics. :param str project: Project ID or project name :param datetime from_date: :param str aggregation_type: :rtype: :class:`<ProjectActivityMetrics> <azure.devops.v7_0.project_analysis.models.ProjectActivityMetrics>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if from_date is not None: query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'iso-8601') if aggregation_type is not None: query_parameters['aggregationType'] = self._serialize.query('aggregation_type', aggregation_type, 'str') response = self._send(http_method='GET', location_id='e40ae584-9ea6-4f06-a7c7-6284651b466b', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ProjectActivityMetrics', response) def get_git_repositories_activity_metrics(self, project, from_date, aggregation_type, skip, top): """GetGitRepositoriesActivityMetrics. Retrieves git activity metrics for repositories matching a specified criteria. :param str project: Project ID or project name :param datetime from_date: Date from which, the trends are to be fetched. :param str aggregation_type: Bucket size on which, trends are to be aggregated. :param int skip: The number of repositories to ignore. :param int top: The number of repositories for which activity metrics are to be retrieved. :rtype: [RepositoryActivityMetrics] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if from_date is not None: query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'iso-8601') if aggregation_type is not None: query_parameters['aggregationType'] = self._serialize.query('aggregation_type', aggregation_type, 'str') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') response = self._send(http_method='GET', location_id='df7fbbca-630a-40e3-8aa3-7a3faf66947e', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[RepositoryActivityMetrics]', self._unwrap_collection(response)) def get_repository_activity_metrics(self, project, repository_id, from_date, aggregation_type): """GetRepositoryActivityMetrics. :param str project: Project ID or project name :param str repository_id: :param datetime from_date: :param str aggregation_type: :rtype: :class:`<RepositoryActivityMetrics> <azure.devops.v7_0.project_analysis.models.RepositoryActivityMetrics>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') query_parameters = {} if from_date is not None: query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'iso-8601') if aggregation_type is not None: query_parameters['aggregationType'] = self._serialize.query('aggregation_type', aggregation_type, 'str') response = self._send(http_method='GET', location_id='df7fbbca-630a-40e3-8aa3-7a3faf66947e', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('RepositoryActivityMetrics', response)
azure-devops-python-api/azure-devops/azure/devops/released/project_analysis/project_analysis_client.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/released/project_analysis/project_analysis_client.py", "repo_id": "azure-devops-python-api", "token_count": 2576 }
373
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest import Serializer, Deserializer from ...client import Client from . import models class TestClient(Client): """Test :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """ def __init__(self, base_url=None, creds=None): super(TestClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = 'c2aa639c-3ccc-4740-b3b6-ce2a1e1d984e' def create_test_result_attachment(self, attachment_request_model, project, run_id, test_case_result_id): """CreateTestResultAttachment. Attach a file to a test result. :param :class:`<TestAttachmentRequestModel> <azure.devops.v7_0.test.models.TestAttachmentRequestModel>` attachment_request_model: Attachment details TestAttachmentRequestModel :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test result against which attachment has to be uploaded. :rtype: :class:`<TestAttachmentReference> <azure.devops.v7_0.test.models.TestAttachmentReference>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') content = self._serialize.body(attachment_request_model, 'TestAttachmentRequestModel') response = self._send(http_method='POST', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values, content=content) return self._deserialize('TestAttachmentReference', response) def create_test_sub_result_attachment(self, attachment_request_model, project, run_id, test_case_result_id, test_sub_result_id): """CreateTestSubResultAttachment. Attach a file to a test result :param :class:`<TestAttachmentRequestModel> <azure.devops.v7_0.test.models.TestAttachmentRequestModel>` attachment_request_model: Attachment Request Model. :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test results that contains sub result. :param int test_sub_result_id: ID of the test sub results against which attachment has to be uploaded. :rtype: :class:`<TestAttachmentReference> <azure.devops.v7_0.test.models.TestAttachmentReference>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') query_parameters = {} if test_sub_result_id is not None: query_parameters['testSubResultId'] = self._serialize.query('test_sub_result_id', test_sub_result_id, 'int') content = self._serialize.body(attachment_request_model, 'TestAttachmentRequestModel') response = self._send(http_method='POST', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('TestAttachmentReference', response) def get_test_result_attachment_content(self, project, run_id, test_case_result_id, attachment_id, **kwargs): """GetTestResultAttachmentContent. Download a test result attachment by its ID. :param str project: Project ID or project name :param int run_id: ID of the test run that contains the testCaseResultId. :param int test_case_result_id: ID of the test result whose attachment has to be downloaded. :param int attachment_id: ID of the test result attachment to be downloaded. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') if attachment_id is not None: route_values['attachmentId'] = self._serialize.url('attachment_id', attachment_id, 'int') response = self._send(http_method='GET', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_test_result_attachments(self, project, run_id, test_case_result_id): """GetTestResultAttachments. Get list of test result attachments reference. :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test result. :rtype: [TestAttachment] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') response = self._send(http_method='GET', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values) return self._deserialize('[TestAttachment]', self._unwrap_collection(response)) def get_test_result_attachment_zip(self, project, run_id, test_case_result_id, attachment_id, **kwargs): """GetTestResultAttachmentZip. Download a test result attachment by its ID. :param str project: Project ID or project name :param int run_id: ID of the test run that contains the testCaseResultId. :param int test_case_result_id: ID of the test result whose attachment has to be downloaded. :param int attachment_id: ID of the test result attachment to be downloaded. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') if attachment_id is not None: route_values['attachmentId'] = self._serialize.url('attachment_id', attachment_id, 'int') response = self._send(http_method='GET', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_test_sub_result_attachment_content(self, project, run_id, test_case_result_id, attachment_id, test_sub_result_id, **kwargs): """GetTestSubResultAttachmentContent. Download a test sub result attachment :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test results that contains sub result. :param int attachment_id: ID of the test result attachment to be downloaded :param int test_sub_result_id: ID of the test sub result whose attachment has to be downloaded :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') if attachment_id is not None: route_values['attachmentId'] = self._serialize.url('attachment_id', attachment_id, 'int') query_parameters = {} if test_sub_result_id is not None: query_parameters['testSubResultId'] = self._serialize.query('test_sub_result_id', test_sub_result_id, 'int') response = self._send(http_method='GET', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_test_sub_result_attachments(self, project, run_id, test_case_result_id, test_sub_result_id): """GetTestSubResultAttachments. Get list of test sub result attachments :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test results that contains sub result. :param int test_sub_result_id: ID of the test sub result whose attachment has to be downloaded :rtype: [TestAttachment] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') query_parameters = {} if test_sub_result_id is not None: query_parameters['testSubResultId'] = self._serialize.query('test_sub_result_id', test_sub_result_id, 'int') response = self._send(http_method='GET', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestAttachment]', self._unwrap_collection(response)) def get_test_sub_result_attachment_zip(self, project, run_id, test_case_result_id, attachment_id, test_sub_result_id, **kwargs): """GetTestSubResultAttachmentZip. Download a test sub result attachment :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test results that contains sub result. :param int attachment_id: ID of the test result attachment to be downloaded :param int test_sub_result_id: ID of the test sub result whose attachment has to be downloaded :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') if attachment_id is not None: route_values['attachmentId'] = self._serialize.url('attachment_id', attachment_id, 'int') query_parameters = {} if test_sub_result_id is not None: query_parameters['testSubResultId'] = self._serialize.query('test_sub_result_id', test_sub_result_id, 'int') response = self._send(http_method='GET', location_id='2bffebe9-2f0f-4639-9af8-56129e9fed2d', version='7.0', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def create_test_run_attachment(self, attachment_request_model, project, run_id): """CreateTestRunAttachment. Attach a file to a test run. :param :class:`<TestAttachmentRequestModel> <azure.devops.v7_0.test.models.TestAttachmentRequestModel>` attachment_request_model: Attachment details TestAttachmentRequestModel :param str project: Project ID or project name :param int run_id: ID of the test run against which attachment has to be uploaded. :rtype: :class:`<TestAttachmentReference> <azure.devops.v7_0.test.models.TestAttachmentReference>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') content = self._serialize.body(attachment_request_model, 'TestAttachmentRequestModel') response = self._send(http_method='POST', location_id='4f004af4-a507-489c-9b13-cb62060beb11', version='7.0', route_values=route_values, content=content) return self._deserialize('TestAttachmentReference', response) def get_test_run_attachment_content(self, project, run_id, attachment_id, **kwargs): """GetTestRunAttachmentContent. Download a test run attachment by its ID. :param str project: Project ID or project name :param int run_id: ID of the test run whose attachment has to be downloaded. :param int attachment_id: ID of the test run attachment to be downloaded. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if attachment_id is not None: route_values['attachmentId'] = self._serialize.url('attachment_id', attachment_id, 'int') response = self._send(http_method='GET', location_id='4f004af4-a507-489c-9b13-cb62060beb11', version='7.0', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_test_run_attachments(self, project, run_id): """GetTestRunAttachments. Get list of test run attachments reference. :param str project: Project ID or project name :param int run_id: ID of the test run. :rtype: [TestAttachment] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') response = self._send(http_method='GET', location_id='4f004af4-a507-489c-9b13-cb62060beb11', version='7.0', route_values=route_values) return self._deserialize('[TestAttachment]', self._unwrap_collection(response)) def get_test_run_attachment_zip(self, project, run_id, attachment_id, **kwargs): """GetTestRunAttachmentZip. Download a test run attachment by its ID. :param str project: Project ID or project name :param int run_id: ID of the test run whose attachment has to be downloaded. :param int attachment_id: ID of the test run attachment to be downloaded. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if attachment_id is not None: route_values['attachmentId'] = self._serialize.url('attachment_id', attachment_id, 'int') response = self._send(http_method='GET', location_id='4f004af4-a507-489c-9b13-cb62060beb11', version='7.0', route_values=route_values, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_build_code_coverage(self, project, build_id, flags): """GetBuildCodeCoverage. Get code coverage data for a build. :param str project: Project ID or project name :param int build_id: ID of the build for which code coverage data needs to be fetched. :param int flags: Value of flags determine the level of code coverage details to be fetched. Flags are additive. Expected Values are 1 for Modules, 2 for Functions, 4 for BlockData. :rtype: [BuildCoverage] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if build_id is not None: query_parameters['buildId'] = self._serialize.query('build_id', build_id, 'int') if flags is not None: query_parameters['flags'] = self._serialize.query('flags', flags, 'int') response = self._send(http_method='GET', location_id='77560e8a-4e8c-4d59-894e-a5f264c24444', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[BuildCoverage]', self._unwrap_collection(response)) def get_test_run_code_coverage(self, project, run_id, flags): """GetTestRunCodeCoverage. Get code coverage data for a test run :param str project: Project ID or project name :param int run_id: ID of the test run for which code coverage data needs to be fetched. :param int flags: Value of flags determine the level of code coverage details to be fetched. Flags are additive. Expected Values are 1 for Modules, 2 for Functions, 4 for BlockData. :rtype: [TestRunCoverage] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') query_parameters = {} if flags is not None: query_parameters['flags'] = self._serialize.query('flags', flags, 'int') response = self._send(http_method='GET', location_id='9629116f-3b89-4ed8-b358-d4694efda160', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestRunCoverage]', self._unwrap_collection(response)) def get_test_iteration(self, project, run_id, test_case_result_id, iteration_id, include_action_results=None): """GetTestIteration. Get iteration for a result :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test result that contains the iterations. :param int iteration_id: Id of the test results Iteration. :param bool include_action_results: Include result details for each action performed in the test iteration. ActionResults refer to outcome (pass/fail) of test steps that are executed as part of a running a manual test. Including the ActionResults flag gets the outcome of test steps in the actionResults section and test parameters in the parameters section for each test iteration. :rtype: :class:`<TestIterationDetailsModel> <azure.devops.v7_0.test.models.TestIterationDetailsModel>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') if iteration_id is not None: route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'int') query_parameters = {} if include_action_results is not None: query_parameters['includeActionResults'] = self._serialize.query('include_action_results', include_action_results, 'bool') response = self._send(http_method='GET', location_id='73eb9074-3446-4c44-8296-2f811950ff8d', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TestIterationDetailsModel', response) def get_test_iterations(self, project, run_id, test_case_result_id, include_action_results=None): """GetTestIterations. Get iterations for a result :param str project: Project ID or project name :param int run_id: ID of the test run that contains the result. :param int test_case_result_id: ID of the test result that contains the iterations. :param bool include_action_results: Include result details for each action performed in the test iteration. ActionResults refer to outcome (pass/fail) of test steps that are executed as part of a running a manual test. Including the ActionResults flag gets the outcome of test steps in the actionResults section and test parameters in the parameters section for each test iteration. :rtype: [TestIterationDetailsModel] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') query_parameters = {} if include_action_results is not None: query_parameters['includeActionResults'] = self._serialize.query('include_action_results', include_action_results, 'bool') response = self._send(http_method='GET', location_id='73eb9074-3446-4c44-8296-2f811950ff8d', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestIterationDetailsModel]', self._unwrap_collection(response)) def get_point(self, project, plan_id, suite_id, point_ids, wit_fields=None): """GetPoint. Get a test point. :param str project: Project ID or project name :param int plan_id: ID of the test plan. :param int suite_id: ID of the suite that contains the point. :param int point_ids: ID of the test point to get. :param str wit_fields: Comma-separated list of work item field names. :rtype: :class:`<TestPoint> <azure.devops.v7_0.test.models.TestPoint>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') if point_ids is not None: route_values['pointIds'] = self._serialize.url('point_ids', point_ids, 'int') query_parameters = {} if wit_fields is not None: query_parameters['witFields'] = self._serialize.query('wit_fields', wit_fields, 'str') response = self._send(http_method='GET', location_id='3bcfd5c8-be62-488e-b1da-b8289ce9299c', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TestPoint', response) def get_points(self, project, plan_id, suite_id, wit_fields=None, configuration_id=None, test_case_id=None, test_point_ids=None, include_point_details=None, skip=None, top=None): """GetPoints. Get a list of test points. :param str project: Project ID or project name :param int plan_id: ID of the test plan. :param int suite_id: ID of the suite that contains the points. :param str wit_fields: Comma-separated list of work item field names. :param str configuration_id: Get test points for specific configuration. :param str test_case_id: Get test points for a specific test case, valid when configurationId is not set. :param str test_point_ids: Get test points for comma-separated list of test point IDs, valid only when configurationId and testCaseId are not set. :param bool include_point_details: Include all properties for the test point. :param int skip: Number of test points to skip.. :param int top: Number of test points to return. :rtype: [TestPoint] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if wit_fields is not None: query_parameters['witFields'] = self._serialize.query('wit_fields', wit_fields, 'str') if configuration_id is not None: query_parameters['configurationId'] = self._serialize.query('configuration_id', configuration_id, 'str') if test_case_id is not None: query_parameters['testCaseId'] = self._serialize.query('test_case_id', test_case_id, 'str') if test_point_ids is not None: query_parameters['testPointIds'] = self._serialize.query('test_point_ids', test_point_ids, 'str') if include_point_details is not None: query_parameters['includePointDetails'] = self._serialize.query('include_point_details', include_point_details, 'bool') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') response = self._send(http_method='GET', location_id='3bcfd5c8-be62-488e-b1da-b8289ce9299c', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestPoint]', self._unwrap_collection(response)) def update_test_points(self, point_update_model, project, plan_id, suite_id, point_ids): """UpdateTestPoints. Update test points. :param :class:`<PointUpdateModel> <azure.devops.v7_0.test.models.PointUpdateModel>` point_update_model: Data to update. :param str project: Project ID or project name :param int plan_id: ID of the test plan. :param int suite_id: ID of the suite that contains the points. :param str point_ids: ID of the test point to get. Use a comma-separated list of IDs to update multiple test points. :rtype: [TestPoint] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') if point_ids is not None: route_values['pointIds'] = self._serialize.url('point_ids', point_ids, 'str') content = self._serialize.body(point_update_model, 'PointUpdateModel') response = self._send(http_method='PATCH', location_id='3bcfd5c8-be62-488e-b1da-b8289ce9299c', version='7.0', route_values=route_values, content=content) return self._deserialize('[TestPoint]', self._unwrap_collection(response)) def get_points_by_query(self, query, project, skip=None, top=None): """GetPointsByQuery. Get test points using query. :param :class:`<TestPointsQuery> <azure.devops.v7_0.test.models.TestPointsQuery>` query: TestPointsQuery to get test points. :param str project: Project ID or project name :param int skip: Number of test points to skip.. :param int top: Number of test points to return. :rtype: :class:`<TestPointsQuery> <azure.devops.v7_0.test.models.TestPointsQuery>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') content = self._serialize.body(query, 'TestPointsQuery') response = self._send(http_method='POST', location_id='b4264fd0-a5d1-43e2-82a5-b9c46b7da9ce', version='7.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('TestPointsQuery', response) def get_result_retention_settings(self, project): """GetResultRetentionSettings. Get test result retention settings :param str project: Project ID or project name :rtype: :class:`<ResultRetentionSettings> <azure.devops.v7_0.test.models.ResultRetentionSettings>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') response = self._send(http_method='GET', location_id='a3206d9e-fa8d-42d3-88cb-f75c51e69cde', version='7.0', route_values=route_values) return self._deserialize('ResultRetentionSettings', response) def update_result_retention_settings(self, retention_settings, project): """UpdateResultRetentionSettings. Update test result retention settings :param :class:`<ResultRetentionSettings> <azure.devops.v7_0.test.models.ResultRetentionSettings>` retention_settings: Test result retention settings details to be updated :param str project: Project ID or project name :rtype: :class:`<ResultRetentionSettings> <azure.devops.v7_0.test.models.ResultRetentionSettings>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(retention_settings, 'ResultRetentionSettings') response = self._send(http_method='PATCH', location_id='a3206d9e-fa8d-42d3-88cb-f75c51e69cde', version='7.0', route_values=route_values, content=content) return self._deserialize('ResultRetentionSettings', response) def add_test_results_to_test_run(self, results, project, run_id): """AddTestResultsToTestRun. Add test results to a test run. :param [TestCaseResult] results: List of test results to add. :param str project: Project ID or project name :param int run_id: Test run ID into which test results to add. :rtype: [TestCaseResult] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') content = self._serialize.body(results, '[TestCaseResult]') response = self._send(http_method='POST', location_id='4637d869-3a76-4468-8057-0bb02aa385cf', version='7.0', route_values=route_values, content=content) return self._deserialize('[TestCaseResult]', self._unwrap_collection(response)) def get_test_result_by_id(self, project, run_id, test_case_result_id, details_to_include=None): """GetTestResultById. Get a test result for a test run. :param str project: Project ID or project name :param int run_id: Test run ID of a test result to fetch. :param int test_case_result_id: Test result ID. :param str details_to_include: Details to include with test results. Default is None. Other values are Iterations, WorkItems and SubResults. :rtype: :class:`<TestCaseResult> <azure.devops.v7_0.test.models.TestCaseResult>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') if test_case_result_id is not None: route_values['testCaseResultId'] = self._serialize.url('test_case_result_id', test_case_result_id, 'int') query_parameters = {} if details_to_include is not None: query_parameters['detailsToInclude'] = self._serialize.query('details_to_include', details_to_include, 'str') response = self._send(http_method='GET', location_id='4637d869-3a76-4468-8057-0bb02aa385cf', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TestCaseResult', response) def get_test_results(self, project, run_id, details_to_include=None, skip=None, top=None, outcomes=None): """GetTestResults. Get test results for a test run. :param str project: Project ID or project name :param int run_id: Test run ID of test results to fetch. :param str details_to_include: Details to include with test results. Default is None. Other values are Iterations and WorkItems. :param int skip: Number of test results to skip from beginning. :param int top: Number of test results to return. Maximum is 1000 when detailsToInclude is None and 200 otherwise. :param [TestOutcome] outcomes: Comma separated list of test outcomes to filter test results. :rtype: [TestCaseResult] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') query_parameters = {} if details_to_include is not None: query_parameters['detailsToInclude'] = self._serialize.query('details_to_include', details_to_include, 'str') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if outcomes is not None: outcomes = ",".join(map(str, outcomes)) query_parameters['outcomes'] = self._serialize.query('outcomes', outcomes, 'str') response = self._send(http_method='GET', location_id='4637d869-3a76-4468-8057-0bb02aa385cf', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestCaseResult]', self._unwrap_collection(response)) def update_test_results(self, results, project, run_id): """UpdateTestResults. Update test results in a test run. :param [TestCaseResult] results: List of test results to update. :param str project: Project ID or project name :param int run_id: Test run ID whose test results to update. :rtype: [TestCaseResult] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') content = self._serialize.body(results, '[TestCaseResult]') response = self._send(http_method='PATCH', location_id='4637d869-3a76-4468-8057-0bb02aa385cf', version='7.0', route_values=route_values, content=content) return self._deserialize('[TestCaseResult]', self._unwrap_collection(response)) def get_test_run_statistics(self, project, run_id): """GetTestRunStatistics. Get test run statistics , used when we want to get summary of a run by outcome. :param str project: Project ID or project name :param int run_id: ID of the run to get. :rtype: :class:`<TestRunStatistic> <azure.devops.v7_0.test.models.TestRunStatistic>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') response = self._send(http_method='GET', location_id='0a42c424-d764-4a16-a2d5-5c85f87d0ae8', version='7.0', route_values=route_values) return self._deserialize('TestRunStatistic', response) def create_test_run(self, test_run, project): """CreateTestRun. Create new test run. :param :class:`<RunCreateModel> <azure.devops.v7_0.test.models.RunCreateModel>` test_run: Run details RunCreateModel :param str project: Project ID or project name :rtype: :class:`<TestRun> <azure.devops.v7_0.test.models.TestRun>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(test_run, 'RunCreateModel') response = self._send(http_method='POST', location_id='cadb3810-d47d-4a3c-a234-fe5f3be50138', version='7.0', route_values=route_values, content=content) return self._deserialize('TestRun', response) def delete_test_run(self, project, run_id): """DeleteTestRun. Delete a test run by its ID. :param str project: Project ID or project name :param int run_id: ID of the run to delete. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') self._send(http_method='DELETE', location_id='cadb3810-d47d-4a3c-a234-fe5f3be50138', version='7.0', route_values=route_values) def get_test_run_by_id(self, project, run_id, include_details=None): """GetTestRunById. Get a test run by its ID. :param str project: Project ID or project name :param int run_id: ID of the run to get. :param bool include_details: Default value is true. It includes details like run statistics, release, build, test environment, post process state, and more. :rtype: :class:`<TestRun> <azure.devops.v7_0.test.models.TestRun>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') query_parameters = {} if include_details is not None: query_parameters['includeDetails'] = self._serialize.query('include_details', include_details, 'bool') response = self._send(http_method='GET', location_id='cadb3810-d47d-4a3c-a234-fe5f3be50138', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TestRun', response) def get_test_runs(self, project, build_uri=None, owner=None, tmi_run_id=None, plan_id=None, include_run_details=None, automated=None, skip=None, top=None): """GetTestRuns. Get a list of test runs. :param str project: Project ID or project name :param str build_uri: URI of the build that the runs used. :param str owner: Team foundation ID of the owner of the runs. :param str tmi_run_id: :param int plan_id: ID of the test plan that the runs are a part of. :param bool include_run_details: If true, include all the properties of the runs. :param bool automated: If true, only returns automated runs. :param int skip: Number of test runs to skip. :param int top: Number of test runs to return. :rtype: [TestRun] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if build_uri is not None: query_parameters['buildUri'] = self._serialize.query('build_uri', build_uri, 'str') if owner is not None: query_parameters['owner'] = self._serialize.query('owner', owner, 'str') if tmi_run_id is not None: query_parameters['tmiRunId'] = self._serialize.query('tmi_run_id', tmi_run_id, 'str') if plan_id is not None: query_parameters['planId'] = self._serialize.query('plan_id', plan_id, 'int') if include_run_details is not None: query_parameters['includeRunDetails'] = self._serialize.query('include_run_details', include_run_details, 'bool') if automated is not None: query_parameters['automated'] = self._serialize.query('automated', automated, 'bool') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') response = self._send(http_method='GET', location_id='cadb3810-d47d-4a3c-a234-fe5f3be50138', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestRun]', self._unwrap_collection(response)) def query_test_runs(self, project, min_last_updated_date, max_last_updated_date, state=None, plan_ids=None, is_automated=None, publish_context=None, build_ids=None, build_def_ids=None, branch_name=None, release_ids=None, release_def_ids=None, release_env_ids=None, release_env_def_ids=None, run_title=None, top=None, continuation_token=None): """QueryTestRuns. Query Test Runs based on filters. Mandatory fields are minLastUpdatedDate and maxLastUpdatedDate. :param str project: Project ID or project name :param datetime min_last_updated_date: Minimum Last Modified Date of run to be queried (Mandatory). :param datetime max_last_updated_date: Maximum Last Modified Date of run to be queried (Mandatory, difference between min and max date can be atmost 7 days). :param str state: Current state of the Runs to be queried. :param [int] plan_ids: Plan Ids of the Runs to be queried, comma separated list of valid ids (limit no. of ids 10). :param bool is_automated: Automation type of the Runs to be queried. :param str publish_context: PublishContext of the Runs to be queried. :param [int] build_ids: Build Ids of the Runs to be queried, comma separated list of valid ids (limit no. of ids 10). :param [int] build_def_ids: Build Definition Ids of the Runs to be queried, comma separated list of valid ids (limit no. of ids 10). :param str branch_name: Source Branch name of the Runs to be queried. :param [int] release_ids: Release Ids of the Runs to be queried, comma separated list of valid ids (limit no. of ids 10). :param [int] release_def_ids: Release Definition Ids of the Runs to be queried, comma separated list of valid ids (limit no. of ids 10). :param [int] release_env_ids: Release Environment Ids of the Runs to be queried, comma separated list of valid ids (limit no. of ids 10). :param [int] release_env_def_ids: Release Environment Definition Ids of the Runs to be queried, comma separated list of valid ids (limit no. of ids 10). :param str run_title: Run Title of the Runs to be queried. :param int top: Number of runs to be queried. Limit is 100 :param str continuation_token: continuationToken received from previous batch or null for first batch. It is not supposed to be created (or altered, if received from last batch) by user. :rtype: :class:`<[TestRun]> <azure.devops.v7_0.test.models.[TestRun]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if min_last_updated_date is not None: query_parameters['minLastUpdatedDate'] = self._serialize.query('min_last_updated_date', min_last_updated_date, 'iso-8601') if max_last_updated_date is not None: query_parameters['maxLastUpdatedDate'] = self._serialize.query('max_last_updated_date', max_last_updated_date, 'iso-8601') if state is not None: query_parameters['state'] = self._serialize.query('state', state, 'str') if plan_ids is not None: plan_ids = ",".join(map(str, plan_ids)) query_parameters['planIds'] = self._serialize.query('plan_ids', plan_ids, 'str') if is_automated is not None: query_parameters['isAutomated'] = self._serialize.query('is_automated', is_automated, 'bool') if publish_context is not None: query_parameters['publishContext'] = self._serialize.query('publish_context', publish_context, 'str') if build_ids is not None: build_ids = ",".join(map(str, build_ids)) query_parameters['buildIds'] = self._serialize.query('build_ids', build_ids, 'str') if build_def_ids is not None: build_def_ids = ",".join(map(str, build_def_ids)) query_parameters['buildDefIds'] = self._serialize.query('build_def_ids', build_def_ids, 'str') if branch_name is not None: query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str') if release_ids is not None: release_ids = ",".join(map(str, release_ids)) query_parameters['releaseIds'] = self._serialize.query('release_ids', release_ids, 'str') if release_def_ids is not None: release_def_ids = ",".join(map(str, release_def_ids)) query_parameters['releaseDefIds'] = self._serialize.query('release_def_ids', release_def_ids, 'str') if release_env_ids is not None: release_env_ids = ",".join(map(str, release_env_ids)) query_parameters['releaseEnvIds'] = self._serialize.query('release_env_ids', release_env_ids, 'str') if release_env_def_ids is not None: release_env_def_ids = ",".join(map(str, release_env_def_ids)) query_parameters['releaseEnvDefIds'] = self._serialize.query('release_env_def_ids', release_env_def_ids, 'str') if run_title is not None: query_parameters['runTitle'] = self._serialize.query('run_title', run_title, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') response = self._send(http_method='GET', location_id='cadb3810-d47d-4a3c-a234-fe5f3be50138', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestRun]', self._unwrap_collection(response)) def update_test_run(self, run_update_model, project, run_id): """UpdateTestRun. Update test run by its ID. :param :class:`<RunUpdateModel> <azure.devops.v7_0.test.models.RunUpdateModel>` run_update_model: Run details RunUpdateModel :param str project: Project ID or project name :param int run_id: ID of the run to update. :rtype: :class:`<TestRun> <azure.devops.v7_0.test.models.TestRun>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if run_id is not None: route_values['runId'] = self._serialize.url('run_id', run_id, 'int') content = self._serialize.body(run_update_model, 'RunUpdateModel') response = self._send(http_method='PATCH', location_id='cadb3810-d47d-4a3c-a234-fe5f3be50138', version='7.0', route_values=route_values, content=content) return self._deserialize('TestRun', response) def create_test_session(self, test_session, team_context): """CreateTestSession. Create a test session :param :class:`<TestSession> <azure.devops.v7_0.test.models.TestSession>` test_session: Test session details for creation :param :class:`<TeamContext> <azure.devops.v7_0.test.models.TeamContext>` team_context: The team context for the operation :rtype: :class:`<TestSession> <azure.devops.v7_0.test.models.TestSession>` """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') content = self._serialize.body(test_session, 'TestSession') response = self._send(http_method='POST', location_id='1500b4b4-6c69-4ca6-9b18-35e9e97fe2ac', version='7.0', route_values=route_values, content=content) return self._deserialize('TestSession', response) def get_test_sessions(self, team_context, period=None, all_sessions=None, include_all_properties=None, source=None, include_only_completed_sessions=None): """GetTestSessions. Get a list of test sessions :param :class:`<TeamContext> <azure.devops.v7_0.test.models.TeamContext>` team_context: The team context for the operation :param int period: Period in days from now, for which test sessions are fetched. :param bool all_sessions: If false, returns test sessions for current user. Otherwise, it returns test sessions for all users :param bool include_all_properties: If true, it returns all properties of the test sessions. Otherwise, it returns the skinny version. :param str source: Source of the test session. :param bool include_only_completed_sessions: If true, it returns test sessions in completed state. Otherwise, it returns test sessions for all states :rtype: [TestSession] """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') query_parameters = {} if period is not None: query_parameters['period'] = self._serialize.query('period', period, 'int') if all_sessions is not None: query_parameters['allSessions'] = self._serialize.query('all_sessions', all_sessions, 'bool') if include_all_properties is not None: query_parameters['includeAllProperties'] = self._serialize.query('include_all_properties', include_all_properties, 'bool') if source is not None: query_parameters['source'] = self._serialize.query('source', source, 'str') if include_only_completed_sessions is not None: query_parameters['includeOnlyCompletedSessions'] = self._serialize.query('include_only_completed_sessions', include_only_completed_sessions, 'bool') response = self._send(http_method='GET', location_id='1500b4b4-6c69-4ca6-9b18-35e9e97fe2ac', version='7.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestSession]', self._unwrap_collection(response)) def update_test_session(self, test_session, team_context): """UpdateTestSession. Update a test session :param :class:`<TestSession> <azure.devops.v7_0.test.models.TestSession>` test_session: Test session details for update :param :class:`<TeamContext> <azure.devops.v7_0.test.models.TeamContext>` team_context: The team context for the operation :rtype: :class:`<TestSession> <azure.devops.v7_0.test.models.TestSession>` """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') content = self._serialize.body(test_session, 'TestSession') response = self._send(http_method='PATCH', location_id='1500b4b4-6c69-4ca6-9b18-35e9e97fe2ac', version='7.0', route_values=route_values, content=content) return self._deserialize('TestSession', response) def add_test_cases_to_suite(self, project, plan_id, suite_id, test_case_ids): """AddTestCasesToSuite. Add test cases to suite. :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suite. :param int suite_id: ID of the test suite to which the test cases must be added. :param str test_case_ids: IDs of the test cases to add to the suite. Ids are specified in comma separated format. :rtype: [SuiteTestCase] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') if test_case_ids is not None: route_values['testCaseIds'] = self._serialize.url('test_case_ids', test_case_ids, 'str') route_values['action'] = 'TestCases' response = self._send(http_method='POST', location_id='a4a1ec1c-b03f-41ca-8857-704594ecf58e', version='7.0', route_values=route_values) return self._deserialize('[SuiteTestCase]', self._unwrap_collection(response)) def get_test_case_by_id(self, project, plan_id, suite_id, test_case_ids): """GetTestCaseById. Get a specific test case in a test suite with test case id. :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suites. :param int suite_id: ID of the suite that contains the test case. :param int test_case_ids: ID of the test case to get. :rtype: :class:`<SuiteTestCase> <azure.devops.v7_0.test.models.SuiteTestCase>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') if test_case_ids is not None: route_values['testCaseIds'] = self._serialize.url('test_case_ids', test_case_ids, 'int') route_values['action'] = 'TestCases' response = self._send(http_method='GET', location_id='a4a1ec1c-b03f-41ca-8857-704594ecf58e', version='7.0', route_values=route_values) return self._deserialize('SuiteTestCase', response) def get_test_cases(self, project, plan_id, suite_id): """GetTestCases. Get all test cases in a suite. :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suites. :param int suite_id: ID of the suite to get. :rtype: [SuiteTestCase] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') route_values['action'] = 'TestCases' response = self._send(http_method='GET', location_id='a4a1ec1c-b03f-41ca-8857-704594ecf58e', version='7.0', route_values=route_values) return self._deserialize('[SuiteTestCase]', self._unwrap_collection(response)) def remove_test_cases_from_suite_url(self, project, plan_id, suite_id, test_case_ids): """RemoveTestCasesFromSuiteUrl. The test points associated with the test cases are removed from the test suite. The test case work item is not deleted from the system. See test cases resource to delete a test case permanently. :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suite. :param int suite_id: ID of the suite to get. :param str test_case_ids: IDs of the test cases to remove from the suite. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') if test_case_ids is not None: route_values['testCaseIds'] = self._serialize.url('test_case_ids', test_case_ids, 'str') route_values['action'] = 'TestCases' self._send(http_method='DELETE', location_id='a4a1ec1c-b03f-41ca-8857-704594ecf58e', version='7.0', route_values=route_values) def update_suite_test_cases(self, suite_test_case_update_model, project, plan_id, suite_id, test_case_ids): """UpdateSuiteTestCases. Updates the properties of the test case association in a suite. :param :class:`<SuiteTestCaseUpdateModel> <azure.devops.v7_0.test.models.SuiteTestCaseUpdateModel>` suite_test_case_update_model: Model for updation of the properties of test case suite association. :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suite. :param int suite_id: ID of the test suite to which the test cases must be added. :param str test_case_ids: IDs of the test cases to add to the suite. Ids are specified in comma separated format. :rtype: [SuiteTestCase] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') if test_case_ids is not None: route_values['testCaseIds'] = self._serialize.url('test_case_ids', test_case_ids, 'str') route_values['action'] = 'TestCases' content = self._serialize.body(suite_test_case_update_model, 'SuiteTestCaseUpdateModel') response = self._send(http_method='PATCH', location_id='a4a1ec1c-b03f-41ca-8857-704594ecf58e', version='7.0', route_values=route_values, content=content) return self._deserialize('[SuiteTestCase]', self._unwrap_collection(response)) def delete_test_case(self, project, test_case_id): """DeleteTestCase. Delete a test case. :param str project: Project ID or project name :param int test_case_id: Id of test case to delete. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if test_case_id is not None: route_values['testCaseId'] = self._serialize.url('test_case_id', test_case_id, 'int') self._send(http_method='DELETE', location_id='4d472e0f-e32c-4ef8-adf4-a4078772889c', version='7.0', route_values=route_values) def query_test_history(self, filter, project): """QueryTestHistory. Get history of a test method using TestHistoryQuery :param :class:`<TestHistoryQuery> <azure.devops.v7_0.test.models.TestHistoryQuery>` filter: TestHistoryQuery to get history :param str project: Project ID or project name :rtype: :class:`<TestHistoryQuery> <azure.devops.v7_0.test.models.TestHistoryQuery>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(filter, 'TestHistoryQuery') response = self._send(http_method='POST', location_id='929fd86c-3e38-4d8c-b4b6-90df256e5971', version='7.0', route_values=route_values, content=content) return self._deserialize('TestHistoryQuery', response)
azure-devops-python-api/azure-devops/azure/devops/v7_0/test/test_client.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_0/test/test_client.py", "repo_id": "azure-devops-python-api", "token_count": 30090 }
374
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest import Serializer, Deserializer from ...client import Client from . import models class ElasticClient(Client): """Elastic :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """ def __init__(self, base_url=None, creds=None): super(ElasticClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = None def get_elastic_pool_logs(self, pool_id, top=None): """GetElasticPoolLogs. [Preview API] Get elastic pool diagnostics logs for a specified Elastic Pool. :param int pool_id: Pool Id of the Elastic Pool :param int top: Number of elastic pool logs to retrieve :rtype: [ElasticPoolLog] """ route_values = {} if pool_id is not None: route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') response = self._send(http_method='GET', location_id='595b1769-61d5-4076-a72a-98a02105ca9a', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ElasticPoolLog]', self._unwrap_collection(response)) def create_elastic_pool(self, elastic_pool, pool_name, authorize_all_pipelines=None, auto_provision_project_pools=None, project_id=None): """CreateElasticPool. [Preview API] Create a new elastic pool. This will create a new TaskAgentPool at the organization level. If a project id is provided, this will create a new TaskAgentQueue in the specified project. :param :class:`<ElasticPool> <azure.devops.v7_1.elastic.models.ElasticPool>` elastic_pool: Elastic pool to create. Contains the properties necessary for configuring a new ElasticPool. :param str pool_name: Name to use for the new TaskAgentPool :param bool authorize_all_pipelines: Setting to determine if all pipelines are authorized to use this TaskAgentPool by default. :param bool auto_provision_project_pools: Setting to automatically provision TaskAgentQueues in every project for the new pool. :param str project_id: Optional: If provided, a new TaskAgentQueue will be created in the specified project. :rtype: :class:`<ElasticPoolCreationResult> <azure.devops.v7_1.elastic.models.ElasticPoolCreationResult>` """ query_parameters = {} if pool_name is not None: query_parameters['poolName'] = self._serialize.query('pool_name', pool_name, 'str') if authorize_all_pipelines is not None: query_parameters['authorizeAllPipelines'] = self._serialize.query('authorize_all_pipelines', authorize_all_pipelines, 'bool') if auto_provision_project_pools is not None: query_parameters['autoProvisionProjectPools'] = self._serialize.query('auto_provision_project_pools', auto_provision_project_pools, 'bool') if project_id is not None: query_parameters['projectId'] = self._serialize.query('project_id', project_id, 'str') content = self._serialize.body(elastic_pool, 'ElasticPool') response = self._send(http_method='POST', location_id='dd3c938f-835b-4971-b99a-db75a47aad43', version='7.1-preview.1', query_parameters=query_parameters, content=content) return self._deserialize('ElasticPoolCreationResult', response) def get_elastic_pool(self, pool_id): """GetElasticPool. [Preview API] Returns the Elastic Pool with the specified Pool Id. :param int pool_id: Pool Id of the associated TaskAgentPool :rtype: :class:`<ElasticPool> <azure.devops.v7_1.elastic.models.ElasticPool>` """ route_values = {} if pool_id is not None: route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int') response = self._send(http_method='GET', location_id='dd3c938f-835b-4971-b99a-db75a47aad43', version='7.1-preview.1', route_values=route_values) return self._deserialize('ElasticPool', response) def get_elastic_pools(self): """GetElasticPools. [Preview API] Get a list of all Elastic Pools. :rtype: [ElasticPool] """ response = self._send(http_method='GET', location_id='dd3c938f-835b-4971-b99a-db75a47aad43', version='7.1-preview.1') return self._deserialize('[ElasticPool]', self._unwrap_collection(response)) def update_elastic_pool(self, elastic_pool_settings, pool_id): """UpdateElasticPool. [Preview API] Update settings on a specified Elastic Pool. :param :class:`<ElasticPoolSettings> <azure.devops.v7_1.elastic.models.ElasticPoolSettings>` elastic_pool_settings: New Elastic Pool settings data :param int pool_id: :rtype: :class:`<ElasticPool> <azure.devops.v7_1.elastic.models.ElasticPool>` """ route_values = {} if pool_id is not None: route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int') content = self._serialize.body(elastic_pool_settings, 'ElasticPoolSettings') response = self._send(http_method='PATCH', location_id='dd3c938f-835b-4971-b99a-db75a47aad43', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('ElasticPool', response) def get_elastic_nodes(self, pool_id, state=None): """GetElasticNodes. [Preview API] Get a list of ElasticNodes currently in the ElasticPool :param int pool_id: Pool id of the ElasticPool :param str state: Optional: Filter to only retrieve ElasticNodes in the given ElasticNodeState :rtype: [ElasticNode] """ route_values = {} if pool_id is not None: route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int') query_parameters = {} if state is not None: query_parameters['$state'] = self._serialize.query('state', state, 'str') response = self._send(http_method='GET', location_id='1b232402-5ff0-42ad-9703-d76497835eb6', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ElasticNode]', self._unwrap_collection(response)) def update_elastic_node(self, elastic_node_settings, pool_id, elastic_node_id): """UpdateElasticNode. [Preview API] Update properties on a specified ElasticNode :param :class:`<ElasticNodeSettings> <azure.devops.v7_1.elastic.models.ElasticNodeSettings>` elastic_node_settings: :param int pool_id: :param int elastic_node_id: :rtype: :class:`<ElasticNode> <azure.devops.v7_1.elastic.models.ElasticNode>` """ route_values = {} if pool_id is not None: route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int') if elastic_node_id is not None: route_values['elasticNodeId'] = self._serialize.url('elastic_node_id', elastic_node_id, 'int') content = self._serialize.body(elastic_node_settings, 'ElasticNodeSettings') response = self._send(http_method='PATCH', location_id='1b232402-5ff0-42ad-9703-d76497835eb6', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('ElasticNode', response)
azure-devops-python-api/azure-devops/azure/devops/v7_1/elastic/elastic_client.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/elastic/elastic_client.py", "repo_id": "azure-devops-python-api", "token_count": 3821 }
375
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest.serialization import Model class ContainerItemBlobReference(Model): """ Represents an reference to a file in Blobstore :param artifact_hash: :type artifact_hash: str :param artifact_id: :type artifact_id: long :param compression_type: :type compression_type: object :param scope_identifier: :type scope_identifier: str :param session_id: :type session_id: str """ _attribute_map = { 'artifact_hash': {'key': 'artifactHash', 'type': 'str'}, 'artifact_id': {'key': 'artifactId', 'type': 'long'}, 'compression_type': {'key': 'compressionType', 'type': 'object'}, 'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'}, 'session_id': {'key': 'sessionId', 'type': 'str'} } def __init__(self, artifact_hash=None, artifact_id=None, compression_type=None, scope_identifier=None, session_id=None): super(ContainerItemBlobReference, self).__init__() self.artifact_hash = artifact_hash self.artifact_id = artifact_id self.compression_type = compression_type self.scope_identifier = scope_identifier self.session_id = session_id class FileContainer(Model): """ Represents a container that encapsulates a hierarchical file system. :param artifact_uri: Uri of the artifact associated with the container. :type artifact_uri: str :param content_location: Download Url for the content of this item. :type content_location: str :param created_by: Owner. :type created_by: str :param date_created: Creation date. :type date_created: datetime :param description: Description. :type description: str :param id: Id. :type id: long :param item_location: Location of the item resource. :type item_location: str :param locator_path: ItemStore Locator for this container. :type locator_path: str :param name: Name. :type name: str :param options: Options the container can have. :type options: object :param scope_identifier: Project Id. :type scope_identifier: str :param security_token: Security token of the artifact associated with the container. :type security_token: str :param signing_key_id: Identifier of the optional encryption key. :type signing_key_id: str :param size: Total size of the files in bytes. :type size: long """ _attribute_map = { 'artifact_uri': {'key': 'artifactUri', 'type': 'str'}, 'content_location': {'key': 'contentLocation', 'type': 'str'}, 'created_by': {'key': 'createdBy', 'type': 'str'}, 'date_created': {'key': 'dateCreated', 'type': 'iso-8601'}, 'description': {'key': 'description', 'type': 'str'}, 'id': {'key': 'id', 'type': 'long'}, 'item_location': {'key': 'itemLocation', 'type': 'str'}, 'locator_path': {'key': 'locatorPath', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'options': {'key': 'options', 'type': 'object'}, 'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'}, 'security_token': {'key': 'securityToken', 'type': 'str'}, 'signing_key_id': {'key': 'signingKeyId', 'type': 'str'}, 'size': {'key': 'size', 'type': 'long'} } def __init__(self, artifact_uri=None, content_location=None, created_by=None, date_created=None, description=None, id=None, item_location=None, locator_path=None, name=None, options=None, scope_identifier=None, security_token=None, signing_key_id=None, size=None): super(FileContainer, self).__init__() self.artifact_uri = artifact_uri self.content_location = content_location self.created_by = created_by self.date_created = date_created self.description = description self.id = id self.item_location = item_location self.locator_path = locator_path self.name = name self.options = options self.scope_identifier = scope_identifier self.security_token = security_token self.signing_key_id = signing_key_id self.size = size class FileContainerItem(Model): """ Represents an item in a container. :param artifact_id: Id for Blobstore reference :type artifact_id: long :param blob_metadata: :type blob_metadata: :class:`ContainerItemBlobReference <azure.devops.v7_1.file_container.models.ContainerItemBlobReference>` :param container_id: Container Id. :type container_id: long :param content_id: :type content_id: str :param content_location: Download Url for the content of this item. :type content_location: str :param created_by: Creator. :type created_by: str :param date_created: Creation date. :type date_created: datetime :param date_last_modified: Last modified date. :type date_last_modified: datetime :param file_encoding: Encoding of the file. Zero if not a file. :type file_encoding: int :param file_hash: Hash value of the file. Null if not a file. :type file_hash: str :param file_id: Id of the file content. :type file_id: int :param file_length: Length of the file. Zero if not of a file. :type file_length: long :param file_type: Type of the file. Zero if not a file. :type file_type: int :param item_location: Location of the item resource. :type item_location: str :param item_type: Type of the item: Folder, File or String. :type item_type: object :param last_modified_by: Modifier. :type last_modified_by: str :param path: Unique path that identifies the item. :type path: str :param scope_identifier: Project Id. :type scope_identifier: str :param status: Status of the item: Created or Pending Upload. :type status: object :param ticket: :type ticket: str """ _attribute_map = { 'artifact_id': {'key': 'artifactId', 'type': 'long'}, 'blob_metadata': {'key': 'blobMetadata', 'type': 'ContainerItemBlobReference'}, 'container_id': {'key': 'containerId', 'type': 'long'}, 'content_id': {'key': 'contentId', 'type': 'str'}, 'content_location': {'key': 'contentLocation', 'type': 'str'}, 'created_by': {'key': 'createdBy', 'type': 'str'}, 'date_created': {'key': 'dateCreated', 'type': 'iso-8601'}, 'date_last_modified': {'key': 'dateLastModified', 'type': 'iso-8601'}, 'file_encoding': {'key': 'fileEncoding', 'type': 'int'}, 'file_hash': {'key': 'fileHash', 'type': 'str'}, 'file_id': {'key': 'fileId', 'type': 'int'}, 'file_length': {'key': 'fileLength', 'type': 'long'}, 'file_type': {'key': 'fileType', 'type': 'int'}, 'item_location': {'key': 'itemLocation', 'type': 'str'}, 'item_type': {'key': 'itemType', 'type': 'object'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'path': {'key': 'path', 'type': 'str'}, 'scope_identifier': {'key': 'scopeIdentifier', 'type': 'str'}, 'status': {'key': 'status', 'type': 'object'}, 'ticket': {'key': 'ticket', 'type': 'str'} } def __init__(self, artifact_id=None, blob_metadata=None, container_id=None, content_id=None, content_location=None, created_by=None, date_created=None, date_last_modified=None, file_encoding=None, file_hash=None, file_id=None, file_length=None, file_type=None, item_location=None, item_type=None, last_modified_by=None, path=None, scope_identifier=None, status=None, ticket=None): super(FileContainerItem, self).__init__() self.artifact_id = artifact_id self.blob_metadata = blob_metadata self.container_id = container_id self.content_id = content_id self.content_location = content_location self.created_by = created_by self.date_created = date_created self.date_last_modified = date_last_modified self.file_encoding = file_encoding self.file_hash = file_hash self.file_id = file_id self.file_length = file_length self.file_type = file_type self.item_location = item_location self.item_type = item_type self.last_modified_by = last_modified_by self.path = path self.scope_identifier = scope_identifier self.status = status self.ticket = ticket __all__ = [ 'ContainerItemBlobReference', 'FileContainer', 'FileContainerItem', ]
azure-devops-python-api/azure-devops/azure/devops/v7_1/file_container/models.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/file_container/models.py", "repo_id": "azure-devops-python-api", "token_count": 3418 }
376
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest.serialization import Model class AccessMapping(Model): """ :param access_point: :type access_point: str :param display_name: :type display_name: str :param moniker: :type moniker: str :param service_owner: The service which owns this access mapping e.g. TFS, ELS, etc. :type service_owner: str :param virtual_directory: Part of the access mapping which applies context after the access point of the server. :type virtual_directory: str """ _attribute_map = { 'access_point': {'key': 'accessPoint', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'moniker': {'key': 'moniker', 'type': 'str'}, 'service_owner': {'key': 'serviceOwner', 'type': 'str'}, 'virtual_directory': {'key': 'virtualDirectory', 'type': 'str'} } def __init__(self, access_point=None, display_name=None, moniker=None, service_owner=None, virtual_directory=None): super(AccessMapping, self).__init__() self.access_point = access_point self.display_name = display_name self.moniker = moniker self.service_owner = service_owner self.virtual_directory = virtual_directory class ConnectionData(Model): """ Data transfer class that holds information needed to set up a connection with a VSS server. :param authenticated_user: The Id of the authenticated user who made this request. More information about the user can be obtained by passing this Id to the Identity service :type authenticated_user: :class:`Identity <azure.devops.v7_1.locations.models.Identity>` :param authorized_user: The Id of the authorized user who made this request. More information about the user can be obtained by passing this Id to the Identity service :type authorized_user: :class:`Identity <azure.devops.v7_1.locations.models.Identity>` :param deployment_id: The id for the server. :type deployment_id: str :param deployment_type: The type for the server Hosted/OnPremises. :type deployment_type: object :param instance_id: The instance id for this host. :type instance_id: str :param last_user_access: The last user access for this instance. Null if not requested specifically. :type last_user_access: datetime :param location_service_data: Data that the location service holds. :type location_service_data: :class:`LocationServiceData <azure.devops.v7_1.locations.models.LocationServiceData>` :param web_application_relative_directory: The virtual directory of the host we are talking to. :type web_application_relative_directory: str """ _attribute_map = { 'authenticated_user': {'key': 'authenticatedUser', 'type': 'Identity'}, 'authorized_user': {'key': 'authorizedUser', 'type': 'Identity'}, 'deployment_id': {'key': 'deploymentId', 'type': 'str'}, 'deployment_type': {'key': 'deploymentType', 'type': 'object'}, 'instance_id': {'key': 'instanceId', 'type': 'str'}, 'last_user_access': {'key': 'lastUserAccess', 'type': 'iso-8601'}, 'location_service_data': {'key': 'locationServiceData', 'type': 'LocationServiceData'}, 'web_application_relative_directory': {'key': 'webApplicationRelativeDirectory', 'type': 'str'} } def __init__(self, authenticated_user=None, authorized_user=None, deployment_id=None, deployment_type=None, instance_id=None, last_user_access=None, location_service_data=None, web_application_relative_directory=None): super(ConnectionData, self).__init__() self.authenticated_user = authenticated_user self.authorized_user = authorized_user self.deployment_id = deployment_id self.deployment_type = deployment_type self.instance_id = instance_id self.last_user_access = last_user_access self.location_service_data = location_service_data self.web_application_relative_directory = web_application_relative_directory class IdentityBase(Model): """ Base Identity class to allow "trimmed" identity class in the GetConnectionData API Makes sure that on-the-wire representations of the derived classes are compatible with each other (e.g. Server responds with PublicIdentity object while client deserializes it as Identity object) Derived classes should not have additional [DataMember] properties :param custom_display_name: The custom display name for the identity (if any). Setting this property to an empty string will clear the existing custom display name. Setting this property to null will not affect the existing persisted value (since null values do not get sent over the wire or to the database) :type custom_display_name: str :param descriptor: :type descriptor: :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param id: Identity Identifier. Also called Storage Key, or VSID :type id: str :param is_active: True if the identity has a membership in any Azure Devops group in the organization. :type is_active: bool :param is_container: True if the identity is a group. :type is_container: bool :param master_id: :type master_id: str :param member_ids: Id of the members of the identity (groups only). :type member_ids: list of str :param member_of: :type member_of: list of :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param members: :type members: list of :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param meta_type_id: :type meta_type_id: int :param properties: :type properties: :class:`object <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.object>` :param provider_display_name: The display name for the identity as specified by the source identity provider. :type provider_display_name: str :param resource_version: :type resource_version: int :param social_descriptor: :type social_descriptor: :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param subject_descriptor: Subject descriptor of a Graph entity. :type subject_descriptor: :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param unique_user_id: :type unique_user_id: int """ _attribute_map = { 'custom_display_name': {'key': 'customDisplayName', 'type': 'str'}, 'descriptor': {'key': 'descriptor', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'is_active': {'key': 'isActive', 'type': 'bool'}, 'is_container': {'key': 'isContainer', 'type': 'bool'}, 'master_id': {'key': 'masterId', 'type': 'str'}, 'member_ids': {'key': 'memberIds', 'type': '[str]'}, 'member_of': {'key': 'memberOf', 'type': '[str]'}, 'members': {'key': 'members', 'type': '[str]'}, 'meta_type_id': {'key': 'metaTypeId', 'type': 'int'}, 'properties': {'key': 'properties', 'type': 'object'}, 'provider_display_name': {'key': 'providerDisplayName', 'type': 'str'}, 'resource_version': {'key': 'resourceVersion', 'type': 'int'}, 'social_descriptor': {'key': 'socialDescriptor', 'type': 'str'}, 'subject_descriptor': {'key': 'subjectDescriptor', 'type': 'str'}, 'unique_user_id': {'key': 'uniqueUserId', 'type': 'int'} } def __init__(self, custom_display_name=None, descriptor=None, id=None, is_active=None, is_container=None, master_id=None, member_ids=None, member_of=None, members=None, meta_type_id=None, properties=None, provider_display_name=None, resource_version=None, social_descriptor=None, subject_descriptor=None, unique_user_id=None): super(IdentityBase, self).__init__() self.custom_display_name = custom_display_name self.descriptor = descriptor self.id = id self.is_active = is_active self.is_container = is_container self.master_id = master_id self.member_ids = member_ids self.member_of = member_of self.members = members self.meta_type_id = meta_type_id self.properties = properties self.provider_display_name = provider_display_name self.resource_version = resource_version self.social_descriptor = social_descriptor self.subject_descriptor = subject_descriptor self.unique_user_id = unique_user_id class LocationMapping(Model): """ :param access_mapping_moniker: :type access_mapping_moniker: str :param location: :type location: str """ _attribute_map = { 'access_mapping_moniker': {'key': 'accessMappingMoniker', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'} } def __init__(self, access_mapping_moniker=None, location=None): super(LocationMapping, self).__init__() self.access_mapping_moniker = access_mapping_moniker self.location = location class LocationServiceData(Model): """ Data transfer class used to transfer data about the location service data over the web service. :param access_mappings: Data about the access mappings contained by this location service. :type access_mappings: list of :class:`AccessMapping <azure.devops.v7_1.locations.models.AccessMapping>` :param client_cache_fresh: Data that the location service holds. :type client_cache_fresh: bool :param client_cache_time_to_live: The time to live on the location service cache. :type client_cache_time_to_live: int :param default_access_mapping_moniker: The default access mapping moniker for the server. :type default_access_mapping_moniker: str :param last_change_id: The obsolete id for the last change that took place on the server (use LastChangeId64). :type last_change_id: int :param last_change_id64: The non-truncated 64-bit id for the last change that took place on the server. :type last_change_id64: long :param service_definitions: Data about the service definitions contained by this location service. :type service_definitions: list of :class:`ServiceDefinition <azure.devops.v7_1.locations.models.ServiceDefinition>` :param service_owner: The identifier of the deployment which is hosting this location data (e.g. SPS, TFS, ELS, Napa, etc.) :type service_owner: str """ _attribute_map = { 'access_mappings': {'key': 'accessMappings', 'type': '[AccessMapping]'}, 'client_cache_fresh': {'key': 'clientCacheFresh', 'type': 'bool'}, 'client_cache_time_to_live': {'key': 'clientCacheTimeToLive', 'type': 'int'}, 'default_access_mapping_moniker': {'key': 'defaultAccessMappingMoniker', 'type': 'str'}, 'last_change_id': {'key': 'lastChangeId', 'type': 'int'}, 'last_change_id64': {'key': 'lastChangeId64', 'type': 'long'}, 'service_definitions': {'key': 'serviceDefinitions', 'type': '[ServiceDefinition]'}, 'service_owner': {'key': 'serviceOwner', 'type': 'str'} } def __init__(self, access_mappings=None, client_cache_fresh=None, client_cache_time_to_live=None, default_access_mapping_moniker=None, last_change_id=None, last_change_id64=None, service_definitions=None, service_owner=None): super(LocationServiceData, self).__init__() self.access_mappings = access_mappings self.client_cache_fresh = client_cache_fresh self.client_cache_time_to_live = client_cache_time_to_live self.default_access_mapping_moniker = default_access_mapping_moniker self.last_change_id = last_change_id self.last_change_id64 = last_change_id64 self.service_definitions = service_definitions self.service_owner = service_owner class ResourceAreaInfo(Model): """ :param id: :type id: str :param location_url: :type location_url: str :param name: :type name: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'location_url': {'key': 'locationUrl', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'} } def __init__(self, id=None, location_url=None, name=None): super(ResourceAreaInfo, self).__init__() self.id = id self.location_url = location_url self.name = name class ServiceDefinition(Model): """ :param description: :type description: str :param display_name: :type display_name: str :param identifier: :type identifier: str :param inherit_level: :type inherit_level: object :param location_mappings: :type location_mappings: list of :class:`LocationMapping <azure.devops.v7_1.locations.models.LocationMapping>` :param max_version: Maximum api version that this resource supports (current server version for this resource). Copied from <c>ApiResourceLocation</c>. :type max_version: str :param min_version: Minimum api version that this resource supports. Copied from <c>ApiResourceLocation</c>. :type min_version: str :param parent_identifier: :type parent_identifier: str :param parent_service_type: :type parent_service_type: str :param properties: :type properties: :class:`object <azure.devops.v7_1.locations.models.object>` :param relative_path: :type relative_path: str :param relative_to_setting: :type relative_to_setting: object :param released_version: The latest version of this resource location that is in "Release" (non-preview) mode. Copied from <c>ApiResourceLocation</c>. :type released_version: str :param resource_version: The current resource version supported by this resource location. Copied from <c>ApiResourceLocation</c>. :type resource_version: int :param service_owner: The service which owns this definition e.g. TFS, ELS, etc. :type service_owner: str :param service_type: :type service_type: str :param status: :type status: object :param tool_id: :type tool_id: str """ _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'identifier': {'key': 'identifier', 'type': 'str'}, 'inherit_level': {'key': 'inheritLevel', 'type': 'object'}, 'location_mappings': {'key': 'locationMappings', 'type': '[LocationMapping]'}, 'max_version': {'key': 'maxVersion', 'type': 'str'}, 'min_version': {'key': 'minVersion', 'type': 'str'}, 'parent_identifier': {'key': 'parentIdentifier', 'type': 'str'}, 'parent_service_type': {'key': 'parentServiceType', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'object'}, 'relative_path': {'key': 'relativePath', 'type': 'str'}, 'relative_to_setting': {'key': 'relativeToSetting', 'type': 'object'}, 'released_version': {'key': 'releasedVersion', 'type': 'str'}, 'resource_version': {'key': 'resourceVersion', 'type': 'int'}, 'service_owner': {'key': 'serviceOwner', 'type': 'str'}, 'service_type': {'key': 'serviceType', 'type': 'str'}, 'status': {'key': 'status', 'type': 'object'}, 'tool_id': {'key': 'toolId', 'type': 'str'} } def __init__(self, description=None, display_name=None, identifier=None, inherit_level=None, location_mappings=None, max_version=None, min_version=None, parent_identifier=None, parent_service_type=None, properties=None, relative_path=None, relative_to_setting=None, released_version=None, resource_version=None, service_owner=None, service_type=None, status=None, tool_id=None): super(ServiceDefinition, self).__init__() self.description = description self.display_name = display_name self.identifier = identifier self.inherit_level = inherit_level self.location_mappings = location_mappings self.max_version = max_version self.min_version = min_version self.parent_identifier = parent_identifier self.parent_service_type = parent_service_type self.properties = properties self.relative_path = relative_path self.relative_to_setting = relative_to_setting self.released_version = released_version self.resource_version = resource_version self.service_owner = service_owner self.service_type = service_type self.status = status self.tool_id = tool_id class Identity(IdentityBase): """ :param custom_display_name: The custom display name for the identity (if any). Setting this property to an empty string will clear the existing custom display name. Setting this property to null will not affect the existing persisted value (since null values do not get sent over the wire or to the database) :type custom_display_name: str :param descriptor: :type descriptor: :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param id: Identity Identifier. Also called Storage Key, or VSID :type id: str :param is_active: True if the identity has a membership in any Azure Devops group in the organization. :type is_active: bool :param is_container: True if the identity is a group. :type is_container: bool :param master_id: :type master_id: str :param member_ids: Id of the members of the identity (groups only). :type member_ids: list of str :param member_of: :type member_of: list of :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param members: :type members: list of :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param meta_type_id: :type meta_type_id: int :param properties: :type properties: :class:`object <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.object>` :param provider_display_name: The display name for the identity as specified by the source identity provider. :type provider_display_name: str :param resource_version: :type resource_version: int :param social_descriptor: :type social_descriptor: :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param subject_descriptor: Subject descriptor of a Graph entity. :type subject_descriptor: :class:`str <azure.devops.v7_1.microsoft._visual_studio._services._web_api.models.str>` :param unique_user_id: :type unique_user_id: int """ _attribute_map = { 'custom_display_name': {'key': 'customDisplayName', 'type': 'str'}, 'descriptor': {'key': 'descriptor', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'is_active': {'key': 'isActive', 'type': 'bool'}, 'is_container': {'key': 'isContainer', 'type': 'bool'}, 'master_id': {'key': 'masterId', 'type': 'str'}, 'member_ids': {'key': 'memberIds', 'type': '[str]'}, 'member_of': {'key': 'memberOf', 'type': '[str]'}, 'members': {'key': 'members', 'type': '[str]'}, 'meta_type_id': {'key': 'metaTypeId', 'type': 'int'}, 'properties': {'key': 'properties', 'type': 'object'}, 'provider_display_name': {'key': 'providerDisplayName', 'type': 'str'}, 'resource_version': {'key': 'resourceVersion', 'type': 'int'}, 'social_descriptor': {'key': 'socialDescriptor', 'type': 'str'}, 'subject_descriptor': {'key': 'subjectDescriptor', 'type': 'str'}, 'unique_user_id': {'key': 'uniqueUserId', 'type': 'int'}, } def __init__(self, custom_display_name=None, descriptor=None, id=None, is_active=None, is_container=None, master_id=None, member_ids=None, member_of=None, members=None, meta_type_id=None, properties=None, provider_display_name=None, resource_version=None, social_descriptor=None, subject_descriptor=None, unique_user_id=None): super(Identity, self).__init__(custom_display_name=custom_display_name, descriptor=descriptor, id=id, is_active=is_active, is_container=is_container, master_id=master_id, member_ids=member_ids, member_of=member_of, members=members, meta_type_id=meta_type_id, properties=properties, provider_display_name=provider_display_name, resource_version=resource_version, social_descriptor=social_descriptor, subject_descriptor=subject_descriptor, unique_user_id=unique_user_id) __all__ = [ 'AccessMapping', 'ConnectionData', 'IdentityBase', 'LocationMapping', 'LocationServiceData', 'ResourceAreaInfo', 'ServiceDefinition', 'Identity', ]
azure-devops-python-api/azure-devops/azure/devops/v7_1/location/models.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/location/models.py", "repo_id": "azure-devops-python-api", "token_count": 7645 }
377
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest.serialization import Model class AttributeDescriptor(Model): """ Identifies an attribute with a name and a container. :param attribute_name: The name of the attribute. :type attribute_name: str :param container_name: The container the attribute resides in. :type container_name: str """ _attribute_map = { 'attribute_name': {'key': 'attributeName', 'type': 'str'}, 'container_name': {'key': 'containerName', 'type': 'str'} } def __init__(self, attribute_name=None, container_name=None): super(AttributeDescriptor, self).__init__() self.attribute_name = attribute_name self.container_name = container_name class AttributesContainer(Model): """ Stores a set of named profile attributes. :param attributes: The attributes stored by the container. :type attributes: dict :param container_name: The name of the container. :type container_name: str :param revision: The maximum revision number of any attribute within the container. :type revision: int """ _attribute_map = { 'attributes': {'key': 'attributes', 'type': '{ProfileAttribute}'}, 'container_name': {'key': 'containerName', 'type': 'str'}, 'revision': {'key': 'revision', 'type': 'int'} } def __init__(self, attributes=None, container_name=None, revision=None): super(AttributesContainer, self).__init__() self.attributes = attributes self.container_name = container_name self.revision = revision class Avatar(Model): """ :param is_auto_generated: :type is_auto_generated: bool :param size: :type size: object :param time_stamp: :type time_stamp: datetime :param value: :type value: str """ _attribute_map = { 'is_auto_generated': {'key': 'isAutoGenerated', 'type': 'bool'}, 'size': {'key': 'size', 'type': 'object'}, 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, 'value': {'key': 'value', 'type': 'str'} } def __init__(self, is_auto_generated=None, size=None, time_stamp=None, value=None): super(Avatar, self).__init__() self.is_auto_generated = is_auto_generated self.size = size self.time_stamp = time_stamp self.value = value class CreateProfileContext(Model): """ :param ci_data: :type ci_data: dict :param contact_with_offers: :type contact_with_offers: bool :param country_name: :type country_name: str :param display_name: :type display_name: str :param email_address: :type email_address: str :param has_account: :type has_account: bool :param language: :type language: str :param phone_number: :type phone_number: str :param profile_state: The current state of the profile. :type profile_state: object """ _attribute_map = { 'ci_data': {'key': 'ciData', 'type': '{object}'}, 'contact_with_offers': {'key': 'contactWithOffers', 'type': 'bool'}, 'country_name': {'key': 'countryName', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'email_address': {'key': 'emailAddress', 'type': 'str'}, 'has_account': {'key': 'hasAccount', 'type': 'bool'}, 'language': {'key': 'language', 'type': 'str'}, 'phone_number': {'key': 'phoneNumber', 'type': 'str'}, 'profile_state': {'key': 'profileState', 'type': 'object'} } def __init__(self, ci_data=None, contact_with_offers=None, country_name=None, display_name=None, email_address=None, has_account=None, language=None, phone_number=None, profile_state=None): super(CreateProfileContext, self).__init__() self.ci_data = ci_data self.contact_with_offers = contact_with_offers self.country_name = country_name self.display_name = display_name self.email_address = email_address self.has_account = has_account self.language = language self.phone_number = phone_number self.profile_state = profile_state class GeoRegion(Model): """ :param region_code: :type region_code: str """ _attribute_map = { 'region_code': {'key': 'regionCode', 'type': 'str'} } def __init__(self, region_code=None): super(GeoRegion, self).__init__() self.region_code = region_code class Profile(Model): """ A user profile. :param application_container: The attributes of this profile. :type application_container: :class:`AttributesContainer <azure.devops.v7_1.profile.models.AttributesContainer>` :param core_attributes: The core attributes of this profile. :type core_attributes: dict :param core_revision: The maximum revision number of any attribute. :type core_revision: int :param id: The unique identifier of the profile. :type id: str :param profile_state: The current state of the profile. :type profile_state: object :param revision: The maximum revision number of any attribute. :type revision: int :param time_stamp: The time at which this profile was last changed. :type time_stamp: datetime """ _attribute_map = { 'application_container': {'key': 'applicationContainer', 'type': 'AttributesContainer'}, 'core_attributes': {'key': 'coreAttributes', 'type': '{CoreProfileAttribute}'}, 'core_revision': {'key': 'coreRevision', 'type': 'int'}, 'id': {'key': 'id', 'type': 'str'}, 'profile_state': {'key': 'profileState', 'type': 'object'}, 'revision': {'key': 'revision', 'type': 'int'}, 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'} } def __init__(self, application_container=None, core_attributes=None, core_revision=None, id=None, profile_state=None, revision=None, time_stamp=None): super(Profile, self).__init__() self.application_container = application_container self.core_attributes = core_attributes self.core_revision = core_revision self.id = id self.profile_state = profile_state self.revision = revision self.time_stamp = time_stamp class ProfileAttributeBase(Model): """ :param descriptor: The descriptor of the attribute. :type descriptor: :class:`AttributeDescriptor <azure.devops.v7_1.profile.models.AttributeDescriptor>` :param revision: The revision number of the attribute. :type revision: int :param time_stamp: The time the attribute was last changed. :type time_stamp: datetime :param value: The value of the attribute. :type value: object """ _attribute_map = { 'descriptor': {'key': 'descriptor', 'type': 'AttributeDescriptor'}, 'revision': {'key': 'revision', 'type': 'int'}, 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, 'value': {'key': 'value', 'type': 'object'} } def __init__(self, descriptor=None, revision=None, time_stamp=None, value=None): super(ProfileAttributeBase, self).__init__() self.descriptor = descriptor self.revision = revision self.time_stamp = time_stamp self.value = value class ProfileRegion(Model): """ Country/region information :param code: The two-letter code defined in ISO 3166 for the country/region. :type code: str :param name: Localized country/region name :type name: str """ _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'} } def __init__(self, code=None, name=None): super(ProfileRegion, self).__init__() self.code = code self.name = name class ProfileRegions(Model): """ Container of country/region information :param notice_contact_consent_requirement_regions: List of country/region code with contact consent requirement type of notice :type notice_contact_consent_requirement_regions: list of str :param opt_out_contact_consent_requirement_regions: List of country/region code with contact consent requirement type of opt-out :type opt_out_contact_consent_requirement_regions: list of str :param regions: List of country/regions :type regions: list of :class:`ProfileRegion <azure.devops.v7_1.profile.models.ProfileRegion>` """ _attribute_map = { 'notice_contact_consent_requirement_regions': {'key': 'noticeContactConsentRequirementRegions', 'type': '[str]'}, 'opt_out_contact_consent_requirement_regions': {'key': 'optOutContactConsentRequirementRegions', 'type': '[str]'}, 'regions': {'key': 'regions', 'type': '[ProfileRegion]'} } def __init__(self, notice_contact_consent_requirement_regions=None, opt_out_contact_consent_requirement_regions=None, regions=None): super(ProfileRegions, self).__init__() self.notice_contact_consent_requirement_regions = notice_contact_consent_requirement_regions self.opt_out_contact_consent_requirement_regions = opt_out_contact_consent_requirement_regions self.regions = regions class CoreProfileAttribute(ProfileAttributeBase): """ A profile attribute which always has a value for each profile. """ _attribute_map = { } def __init__(self): super(CoreProfileAttribute, self).__init__() class ProfileAttribute(ProfileAttributeBase): """ A named object associated with a profile. """ _attribute_map = { } def __init__(self): super(ProfileAttribute, self).__init__() __all__ = [ 'AttributeDescriptor', 'AttributesContainer', 'Avatar', 'CreateProfileContext', 'GeoRegion', 'Profile', 'ProfileAttributeBase', 'ProfileRegion', 'ProfileRegions', 'CoreProfileAttribute', 'ProfileAttribute', ]
azure-devops-python-api/azure-devops/azure/devops/v7_1/profile/models.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/profile/models.py", "repo_id": "azure-devops-python-api", "token_count": 3848 }
378
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest import Serializer, Deserializer from ...client import Client from . import models class ReleaseClient(Client): """Release :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """ def __init__(self, base_url=None, creds=None): super(ReleaseClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = 'efc2f575-36ef-48e9-b672-0c6fb4a48ac5' def get_approvals(self, project, assigned_to_filter=None, status_filter=None, release_ids_filter=None, type_filter=None, top=None, continuation_token=None, query_order=None, include_my_group_approvals=None): """GetApprovals. [Preview API] Get a list of approvals :param str project: Project ID or project name :param str assigned_to_filter: Approvals assigned to this user. :param str status_filter: Approvals with this status. Default is 'pending'. :param [int] release_ids_filter: Approvals for release id(s) mentioned in the filter. Multiple releases can be mentioned by separating them with ',' e.g. releaseIdsFilter=1,2,3,4. :param str type_filter: Approval with this type. :param int top: Number of approvals to get. Default is 50. :param int continuation_token: Gets the approvals after the continuation token provided. :param str query_order: Gets the results in the defined order of created approvals. Default is 'descending'. :param bool include_my_group_approvals: 'true' to include my group approvals. Default is 'false'. :rtype: :class:`<[ReleaseApproval]> <azure.devops.v7_1.release.models.[ReleaseApproval]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if assigned_to_filter is not None: query_parameters['assignedToFilter'] = self._serialize.query('assigned_to_filter', assigned_to_filter, 'str') if status_filter is not None: query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str') if release_ids_filter is not None: release_ids_filter = ",".join(map(str, release_ids_filter)) query_parameters['releaseIdsFilter'] = self._serialize.query('release_ids_filter', release_ids_filter, 'str') if type_filter is not None: query_parameters['typeFilter'] = self._serialize.query('type_filter', type_filter, 'str') if top is not None: query_parameters['top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if include_my_group_approvals is not None: query_parameters['includeMyGroupApprovals'] = self._serialize.query('include_my_group_approvals', include_my_group_approvals, 'bool') response = self._send(http_method='GET', location_id='b47c6458-e73b-47cb-a770-4df1e8813a91', version='7.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ReleaseApproval]', self._unwrap_collection(response)) def update_release_approval(self, approval, project, approval_id): """UpdateReleaseApproval. [Preview API] Update status of an approval :param :class:`<ReleaseApproval> <azure.devops.v7_1.release.models.ReleaseApproval>` approval: ReleaseApproval object having status, approver and comments. :param str project: Project ID or project name :param int approval_id: Id of the approval. :rtype: :class:`<ReleaseApproval> <azure.devops.v7_1.release.models.ReleaseApproval>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if approval_id is not None: route_values['approvalId'] = self._serialize.url('approval_id', approval_id, 'int') content = self._serialize.body(approval, 'ReleaseApproval') response = self._send(http_method='PATCH', location_id='9328e074-59fb-465a-89d9-b09c82ee5109', version='7.1-preview.3', route_values=route_values, content=content) return self._deserialize('ReleaseApproval', response) def get_release_task_attachment_content(self, project, release_id, environment_id, attempt_id, plan_id, timeline_id, record_id, type, name, **kwargs): """GetReleaseTaskAttachmentContent. [Preview API] Get a release task attachment. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of the release environment. :param int attempt_id: Attempt number of deployment. :param str plan_id: Plan Id of the deploy phase. :param str timeline_id: Timeline Id of the task. :param str record_id: Record Id of attachment. :param str type: Type of the attachment. :param str name: Name of the attachment. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if environment_id is not None: route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') if attempt_id is not None: route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') if record_id is not None: route_values['recordId'] = self._serialize.url('record_id', record_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') response = self._send(http_method='GET', location_id='60b86efb-7b8c-4853-8f9f-aa142b77b479', version='7.1-preview.1', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_release_task_attachments(self, project, release_id, environment_id, attempt_id, plan_id, type): """GetReleaseTaskAttachments. [Preview API] Get the release task attachments. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of the release environment. :param int attempt_id: Attempt number of deployment. :param str plan_id: Plan Id of the deploy phase. :param str type: Type of the attachment. :rtype: [ReleaseTaskAttachment] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if environment_id is not None: route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') if attempt_id is not None: route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') response = self._send(http_method='GET', location_id='a4d06688-0dfa-4895-82a5-f43ec9452306', version='7.1-preview.1', route_values=route_values) return self._deserialize('[ReleaseTaskAttachment]', self._unwrap_collection(response)) def create_release_definition(self, release_definition, project): """CreateReleaseDefinition. [Preview API] Create a release definition :param :class:`<ReleaseDefinition> <azure.devops.v7_1.release.models.ReleaseDefinition>` release_definition: release definition object to create. :param str project: Project ID or project name :rtype: :class:`<ReleaseDefinition> <azure.devops.v7_1.release.models.ReleaseDefinition>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(release_definition, 'ReleaseDefinition') response = self._send(http_method='POST', location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665', version='7.1-preview.4', route_values=route_values, content=content) return self._deserialize('ReleaseDefinition', response) def delete_release_definition(self, project, definition_id, comment=None, force_delete=None): """DeleteReleaseDefinition. [Preview API] Delete a release definition. :param str project: Project ID or project name :param int definition_id: Id of the release definition. :param str comment: Comment for deleting a release definition. :param bool force_delete: 'true' to automatically cancel any in-progress release deployments and proceed with release definition deletion . Default is 'false'. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition_id is not None: route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') query_parameters = {} if comment is not None: query_parameters['comment'] = self._serialize.query('comment', comment, 'str') if force_delete is not None: query_parameters['forceDelete'] = self._serialize.query('force_delete', force_delete, 'bool') self._send(http_method='DELETE', location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665', version='7.1-preview.4', route_values=route_values, query_parameters=query_parameters) def get_release_definition(self, project, definition_id, property_filters=None): """GetReleaseDefinition. [Preview API] Get a release definition. :param str project: Project ID or project name :param int definition_id: Id of the release definition. :param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Release Definition will contain values for the specified property Ids (if they exist). If not set, properties will not be included. :rtype: :class:`<ReleaseDefinition> <azure.devops.v7_1.release.models.ReleaseDefinition>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition_id is not None: route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') query_parameters = {} if property_filters is not None: property_filters = ",".join(property_filters) query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') response = self._send(http_method='GET', location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665', version='7.1-preview.4', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ReleaseDefinition', response) def get_release_definitions(self, project, search_text=None, expand=None, artifact_type=None, artifact_source_id=None, top=None, continuation_token=None, query_order=None, path=None, is_exact_name_match=None, tag_filter=None, property_filters=None, definition_id_filter=None, is_deleted=None, search_text_contains_folder_name=None): """GetReleaseDefinitions. [Preview API] Get a list of release definitions. :param str project: Project ID or project name :param str search_text: Get release definitions with names containing searchText. :param str expand: The properties that should be expanded in the list of Release definitions. :param str artifact_type: Release definitions with given artifactType will be returned. Values can be Build, Jenkins, GitHub, Nuget, Team Build (external), ExternalTFSBuild, Git, TFVC, ExternalTfsXamlBuild. :param str artifact_source_id: Release definitions with given artifactSourceId will be returned. e.g. For build it would be {projectGuid}:{BuildDefinitionId}, for Jenkins it would be {JenkinsConnectionId}:{JenkinsDefinitionId}, for TfsOnPrem it would be {TfsOnPremConnectionId}:{ProjectName}:{TfsOnPremDefinitionId}. For third-party artifacts e.g. TeamCity, BitBucket you may refer 'uniqueSourceIdentifier' inside vss-extension.json at https://github.com/Microsoft/vsts-rm-extensions/blob/master/Extensions. :param int top: Number of release definitions to get. :param str continuation_token: Gets the release definitions after the continuation token provided. :param str query_order: Gets the results in the defined order. Default is 'IdAscending'. :param str path: Gets the release definitions under the specified path. :param bool is_exact_name_match: 'true'to gets the release definitions with exact match as specified in searchText. Default is 'false'. :param [str] tag_filter: A comma-delimited list of tags. Only release definitions with these tags will be returned. :param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Release Definitions will contain values for the specified property Ids (if they exist). If not set, properties will not be included. Note that this will not filter out any Release Definition from results irrespective of whether it has property set or not. :param [str] definition_id_filter: A comma-delimited list of release definitions to retrieve. :param bool is_deleted: 'true' to get release definitions that has been deleted. Default is 'false' :param bool search_text_contains_folder_name: 'true' to get the release definitions under the folder with name as specified in searchText. Default is 'false'. :rtype: :class:`<[ReleaseDefinition]> <azure.devops.v7_1.release.models.[ReleaseDefinition]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if search_text is not None: query_parameters['searchText'] = self._serialize.query('search_text', search_text, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') if artifact_type is not None: query_parameters['artifactType'] = self._serialize.query('artifact_type', artifact_type, 'str') if artifact_source_id is not None: query_parameters['artifactSourceId'] = self._serialize.query('artifact_source_id', artifact_source_id, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if is_exact_name_match is not None: query_parameters['isExactNameMatch'] = self._serialize.query('is_exact_name_match', is_exact_name_match, 'bool') if tag_filter is not None: tag_filter = ",".join(tag_filter) query_parameters['tagFilter'] = self._serialize.query('tag_filter', tag_filter, 'str') if property_filters is not None: property_filters = ",".join(property_filters) query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') if definition_id_filter is not None: definition_id_filter = ",".join(definition_id_filter) query_parameters['definitionIdFilter'] = self._serialize.query('definition_id_filter', definition_id_filter, 'str') if is_deleted is not None: query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool') if search_text_contains_folder_name is not None: query_parameters['searchTextContainsFolderName'] = self._serialize.query('search_text_contains_folder_name', search_text_contains_folder_name, 'bool') response = self._send(http_method='GET', location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665', version='7.1-preview.4', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ReleaseDefinition]', self._unwrap_collection(response)) def update_release_definition(self, release_definition, project): """UpdateReleaseDefinition. [Preview API] Update a release definition. :param :class:`<ReleaseDefinition> <azure.devops.v7_1.release.models.ReleaseDefinition>` release_definition: Release definition object to update. :param str project: Project ID or project name :rtype: :class:`<ReleaseDefinition> <azure.devops.v7_1.release.models.ReleaseDefinition>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(release_definition, 'ReleaseDefinition') response = self._send(http_method='PUT', location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665', version='7.1-preview.4', route_values=route_values, content=content) return self._deserialize('ReleaseDefinition', response) def get_deployments(self, project, definition_id=None, definition_environment_id=None, created_by=None, min_modified_time=None, max_modified_time=None, deployment_status=None, operation_status=None, latest_attempts_only=None, query_order=None, top=None, continuation_token=None, created_for=None, min_started_time=None, max_started_time=None, source_branch=None): """GetDeployments. [Preview API] :param str project: Project ID or project name :param int definition_id: :param int definition_environment_id: :param str created_by: :param datetime min_modified_time: :param datetime max_modified_time: :param str deployment_status: :param str operation_status: :param bool latest_attempts_only: :param str query_order: :param int top: :param int continuation_token: :param str created_for: :param datetime min_started_time: :param datetime max_started_time: :param str source_branch: :rtype: :class:`<[Deployment]> <azure.devops.v7_1.release.models.[Deployment]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if definition_id is not None: query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int') if definition_environment_id is not None: query_parameters['definitionEnvironmentId'] = self._serialize.query('definition_environment_id', definition_environment_id, 'int') if created_by is not None: query_parameters['createdBy'] = self._serialize.query('created_by', created_by, 'str') if min_modified_time is not None: query_parameters['minModifiedTime'] = self._serialize.query('min_modified_time', min_modified_time, 'iso-8601') if max_modified_time is not None: query_parameters['maxModifiedTime'] = self._serialize.query('max_modified_time', max_modified_time, 'iso-8601') if deployment_status is not None: query_parameters['deploymentStatus'] = self._serialize.query('deployment_status', deployment_status, 'str') if operation_status is not None: query_parameters['operationStatus'] = self._serialize.query('operation_status', operation_status, 'str') if latest_attempts_only is not None: query_parameters['latestAttemptsOnly'] = self._serialize.query('latest_attempts_only', latest_attempts_only, 'bool') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int') if created_for is not None: query_parameters['createdFor'] = self._serialize.query('created_for', created_for, 'str') if min_started_time is not None: query_parameters['minStartedTime'] = self._serialize.query('min_started_time', min_started_time, 'iso-8601') if max_started_time is not None: query_parameters['maxStartedTime'] = self._serialize.query('max_started_time', max_started_time, 'iso-8601') if source_branch is not None: query_parameters['sourceBranch'] = self._serialize.query('source_branch', source_branch, 'str') response = self._send(http_method='GET', location_id='b005ef73-cddc-448e-9ba2-5193bf36b19f', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Deployment]', self._unwrap_collection(response)) def get_release_environment(self, project, release_id, environment_id, expand=None): """GetReleaseEnvironment. [Preview API] Get a release environment. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of the release environment. :param str expand: A property that should be expanded in the environment. :rtype: :class:`<ReleaseEnvironment> <azure.devops.v7_1.release.models.ReleaseEnvironment>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if environment_id is not None: route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') query_parameters = {} if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') response = self._send(http_method='GET', location_id='a7e426b1-03dc-48af-9dfe-c98bac612dcb', version='7.1-preview.7', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ReleaseEnvironment', response) def update_release_environment(self, environment_update_data, project, release_id, environment_id): """UpdateReleaseEnvironment. [Preview API] Update the status of a release environment :param :class:`<ReleaseEnvironmentUpdateMetadata> <azure.devops.v7_1.release.models.ReleaseEnvironmentUpdateMetadata>` environment_update_data: Environment update meta data. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of release environment. :rtype: :class:`<ReleaseEnvironment> <azure.devops.v7_1.release.models.ReleaseEnvironment>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if environment_id is not None: route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') content = self._serialize.body(environment_update_data, 'ReleaseEnvironmentUpdateMetadata') response = self._send(http_method='PATCH', location_id='a7e426b1-03dc-48af-9dfe-c98bac612dcb', version='7.1-preview.7', route_values=route_values, content=content) return self._deserialize('ReleaseEnvironment', response) def delete_folder(self, project, path): """DeleteFolder. [Preview API] Deletes a definition folder for given folder name and path and all it's existing definitions. :param str project: Project ID or project name :param str path: Path of the folder to delete. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') self._send(http_method='DELETE', location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea', version='7.1-preview.2', route_values=route_values) def get_folders(self, project, path=None, query_order=None): """GetFolders. [Preview API] Gets folders. :param str project: Project ID or project name :param str path: Path of the folder. :param str query_order: Gets the results in the defined order. Default is 'None'. :rtype: [Folder] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') query_parameters = {} if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') response = self._send(http_method='GET', location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Folder]', self._unwrap_collection(response)) def update_folder(self, folder, project, path): """UpdateFolder. [Preview API] Updates an existing folder at given existing path. :param :class:`<Folder> <azure.devops.v7_1.release.models.Folder>` folder: folder. :param str project: Project ID or project name :param str path: Path of the folder to update. :rtype: :class:`<Folder> <azure.devops.v7_1.release.models.Folder>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') content = self._serialize.body(folder, 'Folder') response = self._send(http_method='PATCH', location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea', version='7.1-preview.2', route_values=route_values, content=content) return self._deserialize('Folder', response) def update_gates(self, gate_update_metadata, project, gate_step_id): """UpdateGates. [Preview API] Updates the gate for a deployment. :param :class:`<GateUpdateMetadata> <azure.devops.v7_1.release.models.GateUpdateMetadata>` gate_update_metadata: Metadata to patch the Release Gates. :param str project: Project ID or project name :param int gate_step_id: Gate step Id. :rtype: :class:`<ReleaseGates> <azure.devops.v7_1.release.models.ReleaseGates>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if gate_step_id is not None: route_values['gateStepId'] = self._serialize.url('gate_step_id', gate_step_id, 'int') content = self._serialize.body(gate_update_metadata, 'GateUpdateMetadata') response = self._send(http_method='PATCH', location_id='2666a539-2001-4f80-bcc7-0379956749d4', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('ReleaseGates', response) def get_logs(self, project, release_id, **kwargs): """GetLogs. [Preview API] Get logs for a release Id. :param str project: Project ID or project name :param int release_id: Id of the release. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') response = self._send(http_method='GET', location_id='c37fbab5-214b-48e4-a55b-cb6b4f6e4038', version='7.1-preview.2', route_values=route_values, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_task_log(self, project, release_id, environment_id, release_deploy_phase_id, task_id, start_line=None, end_line=None, **kwargs): """GetTaskLog. [Preview API] Gets the task log of a release as a plain text file. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of release environment. :param int release_deploy_phase_id: Release deploy phase Id. :param int task_id: ReleaseTask Id for the log. :param long start_line: Starting line number for logs :param long end_line: Ending line number for logs :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if environment_id is not None: route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') if release_deploy_phase_id is not None: route_values['releaseDeployPhaseId'] = self._serialize.url('release_deploy_phase_id', release_deploy_phase_id, 'int') if task_id is not None: route_values['taskId'] = self._serialize.url('task_id', task_id, 'int') query_parameters = {} if start_line is not None: query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long') if end_line is not None: query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long') response = self._send(http_method='GET', location_id='17c91af7-09fd-4256-bff1-c24ee4f73bc0', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters, accept_media_type='text/plain') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_manual_intervention(self, project, release_id, manual_intervention_id): """GetManualIntervention. [Preview API] Get manual intervention for a given release and manual intervention id. :param str project: Project ID or project name :param int release_id: Id of the release. :param int manual_intervention_id: Id of the manual intervention. :rtype: :class:`<ManualIntervention> <azure.devops.v7_1.release.models.ManualIntervention>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if manual_intervention_id is not None: route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int') response = self._send(http_method='GET', location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e', version='7.1-preview.1', route_values=route_values) return self._deserialize('ManualIntervention', response) def get_manual_interventions(self, project, release_id): """GetManualInterventions. [Preview API] List all manual interventions for a given release. :param str project: Project ID or project name :param int release_id: Id of the release. :rtype: [ManualIntervention] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') response = self._send(http_method='GET', location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e', version='7.1-preview.1', route_values=route_values) return self._deserialize('[ManualIntervention]', self._unwrap_collection(response)) def update_manual_intervention(self, manual_intervention_update_metadata, project, release_id, manual_intervention_id): """UpdateManualIntervention. [Preview API] Update manual intervention. :param :class:`<ManualInterventionUpdateMetadata> <azure.devops.v7_1.release.models.ManualInterventionUpdateMetadata>` manual_intervention_update_metadata: Meta data to update manual intervention. :param str project: Project ID or project name :param int release_id: Id of the release. :param int manual_intervention_id: Id of the manual intervention. :rtype: :class:`<ManualIntervention> <azure.devops.v7_1.release.models.ManualIntervention>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if manual_intervention_id is not None: route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int') content = self._serialize.body(manual_intervention_update_metadata, 'ManualInterventionUpdateMetadata') response = self._send(http_method='PATCH', location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('ManualIntervention', response) def get_releases(self, project=None, definition_id=None, definition_environment_id=None, search_text=None, created_by=None, status_filter=None, environment_status_filter=None, min_created_time=None, max_created_time=None, query_order=None, top=None, continuation_token=None, expand=None, artifact_type_id=None, source_id=None, artifact_version_id=None, source_branch_filter=None, is_deleted=None, tag_filter=None, property_filters=None, release_id_filter=None, path=None): """GetReleases. [Preview API] Get a list of releases :param str project: Project ID or project name :param int definition_id: Releases from this release definition Id. :param int definition_environment_id: :param str search_text: Releases with names containing searchText. :param str created_by: Releases created by this user. :param str status_filter: Releases that have this status. :param int environment_status_filter: :param datetime min_created_time: Releases that were created after this time. :param datetime max_created_time: Releases that were created before this time. :param str query_order: Gets the results in the defined order of created date for releases. Default is descending. :param int top: Number of releases to get. Default is 50. :param int continuation_token: Gets the releases after the continuation token provided. :param str expand: The property that should be expanded in the list of releases. :param str artifact_type_id: Releases with given artifactTypeId will be returned. Values can be Build, Jenkins, GitHub, Nuget, Team Build (external), ExternalTFSBuild, Git, TFVC, ExternalTfsXamlBuild. :param str source_id: Unique identifier of the artifact used. e.g. For build it would be {projectGuid}:{BuildDefinitionId}, for Jenkins it would be {JenkinsConnectionId}:{JenkinsDefinitionId}, for TfsOnPrem it would be {TfsOnPremConnectionId}:{ProjectName}:{TfsOnPremDefinitionId}. For third-party artifacts e.g. TeamCity, BitBucket you may refer 'uniqueSourceIdentifier' inside vss-extension.json https://github.com/Microsoft/vsts-rm-extensions/blob/master/Extensions. :param str artifact_version_id: Releases with given artifactVersionId will be returned. E.g. in case of Build artifactType, it is buildId. :param str source_branch_filter: Releases with given sourceBranchFilter will be returned. :param bool is_deleted: Gets the soft deleted releases, if true. :param [str] tag_filter: A comma-delimited list of tags. Only releases with these tags will be returned. :param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Releases will contain values for the specified property Ids (if they exist). If not set, properties will not be included. Note that this will not filter out any Release from results irrespective of whether it has property set or not. :param [int] release_id_filter: A comma-delimited list of releases Ids. Only releases with these Ids will be returned. :param str path: Releases under this folder path will be returned :rtype: :class:`<[Release]> <azure.devops.v7_1.release.models.[Release]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if definition_id is not None: query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int') if definition_environment_id is not None: query_parameters['definitionEnvironmentId'] = self._serialize.query('definition_environment_id', definition_environment_id, 'int') if search_text is not None: query_parameters['searchText'] = self._serialize.query('search_text', search_text, 'str') if created_by is not None: query_parameters['createdBy'] = self._serialize.query('created_by', created_by, 'str') if status_filter is not None: query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str') if environment_status_filter is not None: query_parameters['environmentStatusFilter'] = self._serialize.query('environment_status_filter', environment_status_filter, 'int') if min_created_time is not None: query_parameters['minCreatedTime'] = self._serialize.query('min_created_time', min_created_time, 'iso-8601') if max_created_time is not None: query_parameters['maxCreatedTime'] = self._serialize.query('max_created_time', max_created_time, 'iso-8601') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') if artifact_type_id is not None: query_parameters['artifactTypeId'] = self._serialize.query('artifact_type_id', artifact_type_id, 'str') if source_id is not None: query_parameters['sourceId'] = self._serialize.query('source_id', source_id, 'str') if artifact_version_id is not None: query_parameters['artifactVersionId'] = self._serialize.query('artifact_version_id', artifact_version_id, 'str') if source_branch_filter is not None: query_parameters['sourceBranchFilter'] = self._serialize.query('source_branch_filter', source_branch_filter, 'str') if is_deleted is not None: query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool') if tag_filter is not None: tag_filter = ",".join(tag_filter) query_parameters['tagFilter'] = self._serialize.query('tag_filter', tag_filter, 'str') if property_filters is not None: property_filters = ",".join(property_filters) query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') if release_id_filter is not None: release_id_filter = ",".join(map(str, release_id_filter)) query_parameters['releaseIdFilter'] = self._serialize.query('release_id_filter', release_id_filter, 'str') if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') response = self._send(http_method='GET', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='7.1-preview.8', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Release]', self._unwrap_collection(response)) def create_release(self, release_start_metadata, project): """CreateRelease. [Preview API] Create a release. :param :class:`<ReleaseStartMetadata> <azure.devops.v7_1.release.models.ReleaseStartMetadata>` release_start_metadata: Metadata to create a release. :param str project: Project ID or project name :rtype: :class:`<Release> <azure.devops.v7_1.release.models.Release>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(release_start_metadata, 'ReleaseStartMetadata') response = self._send(http_method='POST', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='7.1-preview.8', route_values=route_values, content=content) return self._deserialize('Release', response) def get_release(self, project, release_id, approval_filters=None, property_filters=None, expand=None, top_gate_records=None): """GetRelease. [Preview API] Get a Release :param str project: Project ID or project name :param int release_id: Id of the release. :param str approval_filters: A filter which would allow fetching approval steps selectively based on whether it is automated, or manual. This would also decide whether we should fetch pre and post approval snapshots. Assumes All by default :param [str] property_filters: A comma-delimited list of extended properties to be retrieved. If set, the returned Release will contain values for the specified property Ids (if they exist). If not set, properties will not be included. :param str expand: A property that should be expanded in the release. :param int top_gate_records: Number of release gate records to get. Default is 5. :rtype: :class:`<Release> <azure.devops.v7_1.release.models.Release>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') query_parameters = {} if approval_filters is not None: query_parameters['approvalFilters'] = self._serialize.query('approval_filters', approval_filters, 'str') if property_filters is not None: property_filters = ",".join(property_filters) query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') if top_gate_records is not None: query_parameters['$topGateRecords'] = self._serialize.query('top_gate_records', top_gate_records, 'int') response = self._send(http_method='GET', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='7.1-preview.8', route_values=route_values, query_parameters=query_parameters) return self._deserialize('Release', response) def get_release_revision(self, project, release_id, definition_snapshot_revision, **kwargs): """GetReleaseRevision. [Preview API] Get release for a given revision number. :param str project: Project ID or project name :param int release_id: Id of the release. :param int definition_snapshot_revision: Definition snapshot revision number. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') query_parameters = {} if definition_snapshot_revision is not None: query_parameters['definitionSnapshotRevision'] = self._serialize.query('definition_snapshot_revision', definition_snapshot_revision, 'int') response = self._send(http_method='GET', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='7.1-preview.8', route_values=route_values, query_parameters=query_parameters, accept_media_type='text/plain') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def update_release(self, release, project, release_id): """UpdateRelease. [Preview API] Update a complete release object. :param :class:`<Release> <azure.devops.v7_1.release.models.Release>` release: Release object for update. :param str project: Project ID or project name :param int release_id: Id of the release to update. :rtype: :class:`<Release> <azure.devops.v7_1.release.models.Release>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') content = self._serialize.body(release, 'Release') response = self._send(http_method='PUT', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='7.1-preview.8', route_values=route_values, content=content) return self._deserialize('Release', response) def update_release_resource(self, release_update_metadata, project, release_id): """UpdateReleaseResource. [Preview API] Update few properties of a release. :param :class:`<ReleaseUpdateMetadata> <azure.devops.v7_1.release.models.ReleaseUpdateMetadata>` release_update_metadata: Properties of release to update. :param str project: Project ID or project name :param int release_id: Id of the release to update. :rtype: :class:`<Release> <azure.devops.v7_1.release.models.Release>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') content = self._serialize.body(release_update_metadata, 'ReleaseUpdateMetadata') response = self._send(http_method='PATCH', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='7.1-preview.8', route_values=route_values, content=content) return self._deserialize('Release', response) def get_definition_revision(self, project, definition_id, revision, **kwargs): """GetDefinitionRevision. [Preview API] Get release definition for a given definitionId and revision :param str project: Project ID or project name :param int definition_id: Id of the definition. :param int revision: Id of the revision. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition_id is not None: route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') if revision is not None: route_values['revision'] = self._serialize.url('revision', revision, 'int') response = self._send(http_method='GET', location_id='258b82e0-9d41-43f3-86d6-fef14ddd44bc', version='7.1-preview.1', route_values=route_values, accept_media_type='text/plain') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_release_definition_history(self, project, definition_id): """GetReleaseDefinitionHistory. [Preview API] Get revision history for a release definition :param str project: Project ID or project name :param int definition_id: Id of the definition. :rtype: [ReleaseDefinitionRevision] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition_id is not None: route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') response = self._send(http_method='GET', location_id='258b82e0-9d41-43f3-86d6-fef14ddd44bc', version='7.1-preview.1', route_values=route_values) return self._deserialize('[ReleaseDefinitionRevision]', self._unwrap_collection(response))
azure-devops-python-api/azure-devops/azure/devops/v7_1/release/release_client.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/release/release_client.py", "repo_id": "azure-devops-python-api", "token_count": 22851 }
379
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from msrest import Serializer, Deserializer from ...client import Client from . import models class TestPlanClient(Client): """TestPlan :param str base_url: Service URL :param Authentication creds: Authenticated credentials. """ def __init__(self, base_url=None, creds=None): super(TestPlanClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = None def create_test_configuration(self, test_configuration_create_update_parameters, project): """CreateTestConfiguration. [Preview API] Create a test configuration. :param :class:`<TestConfigurationCreateUpdateParameters> <azure.devops.v7_1.test_plan.models.TestConfigurationCreateUpdateParameters>` test_configuration_create_update_parameters: TestConfigurationCreateUpdateParameters :param str project: Project ID or project name :rtype: :class:`<TestConfiguration> <azure.devops.v7_1.test_plan.models.TestConfiguration>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(test_configuration_create_update_parameters, 'TestConfigurationCreateUpdateParameters') response = self._send(http_method='POST', location_id='8369318e-38fa-4e84-9043-4b2a75d2c256', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('TestConfiguration', response) def delete_test_confguration(self, project, test_configuartion_id): """DeleteTestConfguration. [Preview API] Delete a test configuration by its ID. :param str project: Project ID or project name :param int test_configuartion_id: ID of the test configuration to delete. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if test_configuartion_id is not None: query_parameters['testConfiguartionId'] = self._serialize.query('test_configuartion_id', test_configuartion_id, 'int') self._send(http_method='DELETE', location_id='8369318e-38fa-4e84-9043-4b2a75d2c256', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) def get_test_configuration_by_id(self, project, test_configuration_id): """GetTestConfigurationById. [Preview API] Get a test configuration :param str project: Project ID or project name :param int test_configuration_id: ID of the test configuration to get. :rtype: :class:`<TestConfiguration> <azure.devops.v7_1.test_plan.models.TestConfiguration>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if test_configuration_id is not None: route_values['testConfigurationId'] = self._serialize.url('test_configuration_id', test_configuration_id, 'int') response = self._send(http_method='GET', location_id='8369318e-38fa-4e84-9043-4b2a75d2c256', version='7.1-preview.1', route_values=route_values) return self._deserialize('TestConfiguration', response) def get_test_configurations(self, project, continuation_token=None): """GetTestConfigurations. [Preview API] Get a list of test configurations. :param str project: Project ID or project name :param str continuation_token: If the list of configurations returned is not complete, a continuation token to query next batch of configurations is included in the response header as "x-ms-continuationtoken". Omit this parameter to get the first batch of test configurations. :rtype: :class:`<[TestConfiguration]> <azure.devops.v7_1.test_plan.models.[TestConfiguration]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') response = self._send(http_method='GET', location_id='8369318e-38fa-4e84-9043-4b2a75d2c256', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestConfiguration]', self._unwrap_collection(response)) def update_test_configuration(self, test_configuration_create_update_parameters, project, test_configuartion_id): """UpdateTestConfiguration. [Preview API] Update a test configuration by its ID. :param :class:`<TestConfigurationCreateUpdateParameters> <azure.devops.v7_1.test_plan.models.TestConfigurationCreateUpdateParameters>` test_configuration_create_update_parameters: TestConfigurationCreateUpdateParameters :param str project: Project ID or project name :param int test_configuartion_id: ID of the test configuration to update. :rtype: :class:`<TestConfiguration> <azure.devops.v7_1.test_plan.models.TestConfiguration>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if test_configuartion_id is not None: query_parameters['testConfiguartionId'] = self._serialize.query('test_configuartion_id', test_configuartion_id, 'int') content = self._serialize.body(test_configuration_create_update_parameters, 'TestConfigurationCreateUpdateParameters') response = self._send(http_method='PATCH', location_id='8369318e-38fa-4e84-9043-4b2a75d2c256', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('TestConfiguration', response) def create_test_plan(self, test_plan_create_params, project): """CreateTestPlan. [Preview API] Create a test plan. :param :class:`<TestPlanCreateParams> <azure.devops.v7_1.test_plan.models.TestPlanCreateParams>` test_plan_create_params: A testPlanCreateParams object.TestPlanCreateParams :param str project: Project ID or project name :rtype: :class:`<TestPlan> <azure.devops.v7_1.test_plan.models.TestPlan>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(test_plan_create_params, 'TestPlanCreateParams') response = self._send(http_method='POST', location_id='0e292477-a0c2-47f3-a9b6-34f153d627f4', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('TestPlan', response) def delete_test_plan(self, project, plan_id): """DeleteTestPlan. [Preview API] Delete a test plan. :param str project: Project ID or project name :param int plan_id: ID of the test plan to be deleted. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') self._send(http_method='DELETE', location_id='0e292477-a0c2-47f3-a9b6-34f153d627f4', version='7.1-preview.1', route_values=route_values) def get_test_plan_by_id(self, project, plan_id): """GetTestPlanById. [Preview API] Get a test plan by Id. :param str project: Project ID or project name :param int plan_id: ID of the test plan to get. :rtype: :class:`<TestPlan> <azure.devops.v7_1.test_plan.models.TestPlan>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') response = self._send(http_method='GET', location_id='0e292477-a0c2-47f3-a9b6-34f153d627f4', version='7.1-preview.1', route_values=route_values) return self._deserialize('TestPlan', response) def get_test_plans(self, project, owner=None, continuation_token=None, include_plan_details=None, filter_active_plans=None): """GetTestPlans. [Preview API] Get a list of test plans :param str project: Project ID or project name :param str owner: Filter for test plan by owner ID or name :param str continuation_token: If the list of plans returned is not complete, a continuation token to query next batch of plans is included in the response header as "x-ms-continuationtoken". Omit this parameter to get the first batch of test plans. :param bool include_plan_details: Get all properties of the test plan :param bool filter_active_plans: Get just the active plans :rtype: :class:`<[TestPlan]> <azure.devops.v7_1.test_plan.models.[TestPlan]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if owner is not None: query_parameters['owner'] = self._serialize.query('owner', owner, 'str') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if include_plan_details is not None: query_parameters['includePlanDetails'] = self._serialize.query('include_plan_details', include_plan_details, 'bool') if filter_active_plans is not None: query_parameters['filterActivePlans'] = self._serialize.query('filter_active_plans', filter_active_plans, 'bool') response = self._send(http_method='GET', location_id='0e292477-a0c2-47f3-a9b6-34f153d627f4', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestPlan]', self._unwrap_collection(response)) def update_test_plan(self, test_plan_update_params, project, plan_id): """UpdateTestPlan. [Preview API] Update a test plan. :param :class:`<TestPlanUpdateParams> <azure.devops.v7_1.test_plan.models.TestPlanUpdateParams>` test_plan_update_params: A testPlanUpdateParams object.TestPlanUpdateParams :param str project: Project ID or project name :param int plan_id: ID of the test plan to be updated. :rtype: :class:`<TestPlan> <azure.devops.v7_1.test_plan.models.TestPlan>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') content = self._serialize.body(test_plan_update_params, 'TestPlanUpdateParams') response = self._send(http_method='PATCH', location_id='0e292477-a0c2-47f3-a9b6-34f153d627f4', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('TestPlan', response) def get_suite_entries(self, project, suite_id, suite_entry_type=None): """GetSuiteEntries. [Preview API] Get a list of test suite entries in the test suite. :param str project: Project ID or project name :param int suite_id: Id of the parent suite. :param str suite_entry_type: :rtype: [SuiteEntry] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if suite_entry_type is not None: query_parameters['suiteEntryType'] = self._serialize.query('suite_entry_type', suite_entry_type, 'str') response = self._send(http_method='GET', location_id='d6733edf-72f1-4252-925b-c560dfe9b75a', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[SuiteEntry]', self._unwrap_collection(response)) def reorder_suite_entries(self, suite_entries, project, suite_id): """ReorderSuiteEntries. [Preview API] Reorder test suite entries in the test suite. :param [SuiteEntryUpdateParams] suite_entries: List of SuiteEntry to reorder. :param str project: Project ID or project name :param int suite_id: Id of the parent test suite. :rtype: [SuiteEntry] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') content = self._serialize.body(suite_entries, '[SuiteEntryUpdateParams]') response = self._send(http_method='PATCH', location_id='d6733edf-72f1-4252-925b-c560dfe9b75a', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('[SuiteEntry]', self._unwrap_collection(response)) def create_test_suite(self, test_suite_create_params, project, plan_id): """CreateTestSuite. [Preview API] Create test suite. :param :class:`<TestSuiteCreateParams> <azure.devops.v7_1.test_plan.models.TestSuiteCreateParams>` test_suite_create_params: Parameters for suite creation :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suites. :rtype: :class:`<TestSuite> <azure.devops.v7_1.test_plan.models.TestSuite>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') content = self._serialize.body(test_suite_create_params, 'TestSuiteCreateParams') response = self._send(http_method='POST', location_id='1046d5d3-ab61-4ca7-a65a-36118a978256', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('TestSuite', response) def delete_test_suite(self, project, plan_id, suite_id): """DeleteTestSuite. [Preview API] Delete test suite. :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suite. :param int suite_id: ID of the test suite to delete. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') self._send(http_method='DELETE', location_id='1046d5d3-ab61-4ca7-a65a-36118a978256', version='7.1-preview.1', route_values=route_values) def get_test_suite_by_id(self, project, plan_id, suite_id, expand=None): """GetTestSuiteById. [Preview API] Get test suite by suite id. :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suites. :param int suite_id: ID of the suite to get. :param str expand: Include the children suites and testers details :rtype: :class:`<TestSuite> <azure.devops.v7_1.test_plan.models.TestSuite>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if expand is not None: query_parameters['expand'] = self._serialize.query('expand', expand, 'str') response = self._send(http_method='GET', location_id='1046d5d3-ab61-4ca7-a65a-36118a978256', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TestSuite', response) def get_test_suites_for_plan(self, project, plan_id, expand=None, continuation_token=None, as_tree_view=None): """GetTestSuitesForPlan. [Preview API] Get test suites for plan. :param str project: Project ID or project name :param int plan_id: ID of the test plan for which suites are requested. :param str expand: Include the children suites and testers details. :param str continuation_token: If the list of suites returned is not complete, a continuation token to query next batch of suites is included in the response header as "x-ms-continuationtoken". Omit this parameter to get the first batch of test suites. :param bool as_tree_view: If the suites returned should be in a tree structure. :rtype: :class:`<[TestSuite]> <azure.devops.v7_1.test_plan.models.[TestSuite]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') query_parameters = {} if expand is not None: query_parameters['expand'] = self._serialize.query('expand', expand, 'str') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if as_tree_view is not None: query_parameters['asTreeView'] = self._serialize.query('as_tree_view', as_tree_view, 'bool') response = self._send(http_method='GET', location_id='1046d5d3-ab61-4ca7-a65a-36118a978256', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestSuite]', self._unwrap_collection(response)) def update_test_suite(self, test_suite_update_params, project, plan_id, suite_id): """UpdateTestSuite. [Preview API] Update test suite. :param :class:`<TestSuiteUpdateParams> <azure.devops.v7_1.test_plan.models.TestSuiteUpdateParams>` test_suite_update_params: Parameters for suite updation :param str project: Project ID or project name :param int plan_id: ID of the test plan that contains the suites. :param int suite_id: ID of the parent suite. :rtype: :class:`<TestSuite> <azure.devops.v7_1.test_plan.models.TestSuite>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') content = self._serialize.body(test_suite_update_params, 'TestSuiteUpdateParams') response = self._send(http_method='PATCH', location_id='1046d5d3-ab61-4ca7-a65a-36118a978256', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('TestSuite', response) def get_suites_by_test_case_id(self, test_case_id): """GetSuitesByTestCaseId. [Preview API] Find the list of all test suites in which a given test case is present. This is helpful if you need to find out which test suites are using a test case, when you need to make changes to a test case. :param int test_case_id: ID of the test case for which suites need to be fetched. :rtype: [TestSuite] """ query_parameters = {} if test_case_id is not None: query_parameters['testCaseId'] = self._serialize.query('test_case_id', test_case_id, 'int') response = self._send(http_method='GET', location_id='a4080e84-f17b-4fad-84f1-7960b6525bf2', version='7.1-preview.1', query_parameters=query_parameters) return self._deserialize('[TestSuite]', self._unwrap_collection(response)) def add_test_cases_to_suite(self, suite_test_case_create_update_parameters, project, plan_id, suite_id): """AddTestCasesToSuite. [Preview API] Add test cases to a suite with specified configurations :param [SuiteTestCaseCreateUpdateParameters] suite_test_case_create_update_parameters: SuiteTestCaseCreateUpdateParameters object. :param str project: Project ID or project name :param int plan_id: ID of the test plan to which test cases are to be added. :param int suite_id: ID of the test suite to which test cases are to be added. :rtype: [TestCase] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') content = self._serialize.body(suite_test_case_create_update_parameters, '[SuiteTestCaseCreateUpdateParameters]') response = self._send(http_method='POST', location_id='a9bd61ac-45cf-4d13-9441-43dcd01edf8d', version='7.1-preview.3', route_values=route_values, content=content) return self._deserialize('[TestCase]', self._unwrap_collection(response)) def get_test_case(self, project, plan_id, suite_id, test_case_id, wit_fields=None, return_identity_ref=None): """GetTestCase. [Preview API] Get a particular Test Case from a Suite. :param str project: Project ID or project name :param int plan_id: ID of the test plan for which test cases are requested. :param int suite_id: ID of the test suite for which test cases are requested. :param str test_case_id: Test Case Id to be fetched. :param str wit_fields: Get the list of witFields. :param bool return_identity_ref: If set to true, returns all identity fields, like AssignedTo, ActivatedBy etc., as IdentityRef objects. If set to false, these fields are returned as unique names in string format. This is false by default. :rtype: [TestCase] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') if test_case_id is not None: route_values['testCaseId'] = self._serialize.url('test_case_id', test_case_id, 'str') query_parameters = {} if wit_fields is not None: query_parameters['witFields'] = self._serialize.query('wit_fields', wit_fields, 'str') if return_identity_ref is not None: query_parameters['returnIdentityRef'] = self._serialize.query('return_identity_ref', return_identity_ref, 'bool') response = self._send(http_method='GET', location_id='a9bd61ac-45cf-4d13-9441-43dcd01edf8d', version='7.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestCase]', self._unwrap_collection(response)) def get_test_case_list(self, project, plan_id, suite_id, test_ids=None, configuration_ids=None, wit_fields=None, continuation_token=None, return_identity_ref=None, expand=None, exclude_flags=None, is_recursive=None): """GetTestCaseList. [Preview API] Get Test Case List return those test cases which have all the configuration Ids as mentioned in the optional parameter. If configuration Ids is null, it return all the test cases :param str project: Project ID or project name :param int plan_id: ID of the test plan for which test cases are requested. :param int suite_id: ID of the test suite for which test cases are requested. :param str test_ids: Test Case Ids to be fetched. :param str configuration_ids: Fetch Test Cases which contains all the configuration Ids specified. :param str wit_fields: Get the list of witFields. :param str continuation_token: If the list of test cases returned is not complete, a continuation token to query next batch of test cases is included in the response header as "x-ms-continuationtoken". Omit this parameter to get the first batch of test cases. :param bool return_identity_ref: If set to true, returns all identity fields, like AssignedTo, ActivatedBy etc., as IdentityRef objects. If set to false, these fields are returned as unique names in string format. This is false by default. :param bool expand: If set to false, will get a smaller payload containing only basic details about the suite test case object :param str exclude_flags: Flag to exclude various values from payload. For example to remove point assignments pass exclude = 1. To remove extra information (links, test plan , test suite) pass exclude = 2. To remove both extra information and point assignments pass exclude = 3 (1 + 2). :param bool is_recursive: :rtype: :class:`<[TestCase]> <azure.devops.v7_1.test_plan.models.[TestCase]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if test_ids is not None: query_parameters['testIds'] = self._serialize.query('test_ids', test_ids, 'str') if configuration_ids is not None: query_parameters['configurationIds'] = self._serialize.query('configuration_ids', configuration_ids, 'str') if wit_fields is not None: query_parameters['witFields'] = self._serialize.query('wit_fields', wit_fields, 'str') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if return_identity_ref is not None: query_parameters['returnIdentityRef'] = self._serialize.query('return_identity_ref', return_identity_ref, 'bool') if expand is not None: query_parameters['expand'] = self._serialize.query('expand', expand, 'bool') if exclude_flags is not None: query_parameters['excludeFlags'] = self._serialize.query('exclude_flags', exclude_flags, 'str') if is_recursive is not None: query_parameters['isRecursive'] = self._serialize.query('is_recursive', is_recursive, 'bool') response = self._send(http_method='GET', location_id='a9bd61ac-45cf-4d13-9441-43dcd01edf8d', version='7.1-preview.3', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestCase]', self._unwrap_collection(response)) def remove_test_cases_from_suite(self, project, plan_id, suite_id, test_case_ids): """RemoveTestCasesFromSuite. [Preview API] Removes test cases from a suite based on the list of test case Ids provided. :param str project: Project ID or project name :param int plan_id: ID of the test plan from which test cases are to be removed. :param int suite_id: ID of the test suite from which test cases are to be removed. :param str test_case_ids: Test Case Ids to be removed. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if test_case_ids is not None: query_parameters['testCaseIds'] = self._serialize.query('test_case_ids', test_case_ids, 'str') self._send(http_method='DELETE', location_id='a9bd61ac-45cf-4d13-9441-43dcd01edf8d', version='7.1-preview.3', route_values=route_values, query_parameters=query_parameters) def remove_test_cases_list_from_suite(self, project, plan_id, suite_id, test_ids): """RemoveTestCasesListFromSuite. [Preview API] Removes test cases from a suite based on the list of test case Ids provided. This API can be used to remove a larger number of test cases. :param str project: Project ID or project name :param int plan_id: ID of the test plan from which test cases are to be removed. :param int suite_id: ID of the test suite from which test cases are to be removed. :param str test_ids: Comma separated string of Test Case Ids to be removed. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if test_ids is not None: query_parameters['testIds'] = self._serialize.query('test_ids', test_ids, 'str') self._send(http_method='DELETE', location_id='a9bd61ac-45cf-4d13-9441-43dcd01edf8d', version='7.1-preview.3', route_values=route_values, query_parameters=query_parameters) def update_suite_test_cases(self, suite_test_case_create_update_parameters, project, plan_id, suite_id): """UpdateSuiteTestCases. [Preview API] Update the configurations for test cases :param [SuiteTestCaseCreateUpdateParameters] suite_test_case_create_update_parameters: A SuiteTestCaseCreateUpdateParameters object. :param str project: Project ID or project name :param int plan_id: ID of the test plan to which test cases are to be updated. :param int suite_id: ID of the test suite to which test cases are to be updated. :rtype: [TestCase] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') content = self._serialize.body(suite_test_case_create_update_parameters, '[SuiteTestCaseCreateUpdateParameters]') response = self._send(http_method='PATCH', location_id='a9bd61ac-45cf-4d13-9441-43dcd01edf8d', version='7.1-preview.3', route_values=route_values, content=content) return self._deserialize('[TestCase]', self._unwrap_collection(response)) def clone_test_case(self, clone_request_body, project): """CloneTestCase. [Preview API] :param :class:`<CloneTestCaseParams> <azure.devops.v7_1.test_plan.models.CloneTestCaseParams>` clone_request_body: :param str project: Project ID or project name :rtype: :class:`<CloneTestCaseOperationInformation> <azure.devops.v7_1.test_plan.models.CloneTestCaseOperationInformation>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(clone_request_body, 'CloneTestCaseParams') response = self._send(http_method='POST', location_id='529b2b8d-82f4-4893-b1e4-1e74ea534673', version='7.1-preview.2', route_values=route_values, content=content) return self._deserialize('CloneTestCaseOperationInformation', response) def get_test_case_clone_information(self, project, clone_operation_id): """GetTestCaseCloneInformation. [Preview API] Get clone information. :param str project: Project ID or project name :param int clone_operation_id: Operation ID returned when we queue a clone operation :rtype: :class:`<CloneTestCaseOperationInformation> <azure.devops.v7_1.test_plan.models.CloneTestCaseOperationInformation>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if clone_operation_id is not None: route_values['cloneOperationId'] = self._serialize.url('clone_operation_id', clone_operation_id, 'int') response = self._send(http_method='GET', location_id='529b2b8d-82f4-4893-b1e4-1e74ea534673', version='7.1-preview.2', route_values=route_values) return self._deserialize('CloneTestCaseOperationInformation', response) def delete_test_case(self, project, test_case_id): """DeleteTestCase. [Preview API] Delete a test case. :param str project: Project ID or project name :param int test_case_id: Id of test case to be deleted. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if test_case_id is not None: route_values['testCaseId'] = self._serialize.url('test_case_id', test_case_id, 'int') self._send(http_method='DELETE', location_id='29006fb5-816b-4ff7-a329-599943569229', version='7.1-preview.1', route_values=route_values) def clone_test_plan(self, clone_request_body, project, deep_clone=None): """CloneTestPlan. [Preview API] Clone test plan :param :class:`<CloneTestPlanParams> <azure.devops.v7_1.test_plan.models.CloneTestPlanParams>` clone_request_body: Plan Clone Request Body detail TestPlanCloneRequest :param str project: Project ID or project name :param bool deep_clone: Clones all the associated test cases as well :rtype: :class:`<CloneTestPlanOperationInformation> <azure.devops.v7_1.test_plan.models.CloneTestPlanOperationInformation>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if deep_clone is not None: query_parameters['deepClone'] = self._serialize.query('deep_clone', deep_clone, 'bool') content = self._serialize.body(clone_request_body, 'CloneTestPlanParams') response = self._send(http_method='POST', location_id='e65df662-d8a3-46c7-ae1c-14e2d4df57e1', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('CloneTestPlanOperationInformation', response) def get_clone_information(self, project, clone_operation_id): """GetCloneInformation. [Preview API] Get clone information. :param str project: Project ID or project name :param int clone_operation_id: Operation ID returned when we queue a clone operation :rtype: :class:`<CloneTestPlanOperationInformation> <azure.devops.v7_1.test_plan.models.CloneTestPlanOperationInformation>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if clone_operation_id is not None: route_values['cloneOperationId'] = self._serialize.url('clone_operation_id', clone_operation_id, 'int') response = self._send(http_method='GET', location_id='e65df662-d8a3-46c7-ae1c-14e2d4df57e1', version='7.1-preview.2', route_values=route_values) return self._deserialize('CloneTestPlanOperationInformation', response) def get_points(self, project, plan_id, suite_id, point_id, return_identity_ref=None, include_point_details=None): """GetPoints. [Preview API] Get a particular Test Point from a suite. :param str project: Project ID or project name :param int plan_id: ID of the test plan for which test points are requested. :param int suite_id: ID of the test suite for which test points are requested. :param str point_id: ID of test point to be fetched. :param bool return_identity_ref: If set to true, returns the AssignedTo field in TestCaseReference as IdentityRef object. :param bool include_point_details: If set to false, will get a smaller payload containing only basic details about the test point object :rtype: [TestPoint] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if point_id is not None: query_parameters['pointId'] = self._serialize.query('point_id', point_id, 'str') if return_identity_ref is not None: query_parameters['returnIdentityRef'] = self._serialize.query('return_identity_ref', return_identity_ref, 'bool') if include_point_details is not None: query_parameters['includePointDetails'] = self._serialize.query('include_point_details', include_point_details, 'bool') response = self._send(http_method='GET', location_id='52df686e-bae4-4334-b0ee-b6cf4e6f6b73', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestPoint]', self._unwrap_collection(response)) def get_points_list(self, project, plan_id, suite_id, test_point_ids=None, test_case_id=None, continuation_token=None, return_identity_ref=None, include_point_details=None, is_recursive=None): """GetPointsList. [Preview API] Get all the points inside a suite based on some filters :param str project: Project ID or project name :param int plan_id: ID of the test plan for which test points are requested. :param int suite_id: ID of the test suite for which test points are requested :param str test_point_ids: ID of test points to fetch. :param str test_case_id: Get Test Points for specific test case Ids. :param str continuation_token: If the list of test point returned is not complete, a continuation token to query next batch of test points is included in the response header as "x-ms-continuationtoken". Omit this parameter to get the first batch of test points. :param bool return_identity_ref: If set to true, returns the AssignedTo field in TestCaseReference as IdentityRef object. :param bool include_point_details: If set to false, will get a smaller payload containing only basic details about the test point object :param bool is_recursive: If set to true, will also fetch test points belonging to child suites recursively. :rtype: :class:`<[TestPoint]> <azure.devops.v7_1.test_plan.models.[TestPoint]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if test_point_ids is not None: query_parameters['testPointIds'] = self._serialize.query('test_point_ids', test_point_ids, 'str') if test_case_id is not None: query_parameters['testCaseId'] = self._serialize.query('test_case_id', test_case_id, 'str') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if return_identity_ref is not None: query_parameters['returnIdentityRef'] = self._serialize.query('return_identity_ref', return_identity_ref, 'bool') if include_point_details is not None: query_parameters['includePointDetails'] = self._serialize.query('include_point_details', include_point_details, 'bool') if is_recursive is not None: query_parameters['isRecursive'] = self._serialize.query('is_recursive', is_recursive, 'bool') response = self._send(http_method='GET', location_id='52df686e-bae4-4334-b0ee-b6cf4e6f6b73', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestPoint]', self._unwrap_collection(response)) def update_test_points(self, test_point_update_params, project, plan_id, suite_id, include_point_details=None, return_identity_ref=None): """UpdateTestPoints. [Preview API] Update Test Points. This is used to Reset test point to active, update the outcome of a test point or update the tester of a test point :param [TestPointUpdateParams] test_point_update_params: A TestPointUpdateParams Object. :param str project: Project ID or project name :param int plan_id: ID of the test plan for which test points are requested. :param int suite_id: ID of the test suite for which test points are requested. :param bool include_point_details: If set to false, will get a smaller payload containing only basic details about the test point object :param bool return_identity_ref: If set to true, returns the AssignedTo field in TestCaseReference as IdentityRef object. :rtype: [TestPoint] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'int') if suite_id is not None: route_values['suiteId'] = self._serialize.url('suite_id', suite_id, 'int') query_parameters = {} if include_point_details is not None: query_parameters['includePointDetails'] = self._serialize.query('include_point_details', include_point_details, 'bool') if return_identity_ref is not None: query_parameters['returnIdentityRef'] = self._serialize.query('return_identity_ref', return_identity_ref, 'bool') content = self._serialize.body(test_point_update_params, '[TestPointUpdateParams]') response = self._send(http_method='PATCH', location_id='52df686e-bae4-4334-b0ee-b6cf4e6f6b73', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('[TestPoint]', self._unwrap_collection(response)) def clone_test_suite(self, clone_request_body, project, deep_clone=None): """CloneTestSuite. [Preview API] Clone test suite :param :class:`<CloneTestSuiteParams> <azure.devops.v7_1.test_plan.models.CloneTestSuiteParams>` clone_request_body: Suite Clone Request Body detail TestSuiteCloneRequest :param str project: Project ID or project name :param bool deep_clone: Clones all the associated test cases as well :rtype: :class:`<CloneTestSuiteOperationInformation> <azure.devops.v7_1.test_plan.models.CloneTestSuiteOperationInformation>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if deep_clone is not None: query_parameters['deepClone'] = self._serialize.query('deep_clone', deep_clone, 'bool') content = self._serialize.body(clone_request_body, 'CloneTestSuiteParams') response = self._send(http_method='POST', location_id='181d4c97-0e98-4ee2-ad6a-4cada675e555', version='7.1-preview.2', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('CloneTestSuiteOperationInformation', response) def get_suite_clone_information(self, project, clone_operation_id): """GetSuiteCloneInformation. [Preview API] Get clone information. :param str project: Project ID or project name :param int clone_operation_id: Operation ID returned when we queue a clone operation :rtype: :class:`<CloneTestSuiteOperationInformation> <azure.devops.v7_1.test_plan.models.CloneTestSuiteOperationInformation>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if clone_operation_id is not None: route_values['cloneOperationId'] = self._serialize.url('clone_operation_id', clone_operation_id, 'int') response = self._send(http_method='GET', location_id='181d4c97-0e98-4ee2-ad6a-4cada675e555', version='7.1-preview.2', route_values=route_values) return self._deserialize('CloneTestSuiteOperationInformation', response) def create_test_variable(self, test_variable_create_update_parameters, project): """CreateTestVariable. [Preview API] Create a test variable. :param :class:`<TestVariableCreateUpdateParameters> <azure.devops.v7_1.test_plan.models.TestVariableCreateUpdateParameters>` test_variable_create_update_parameters: TestVariableCreateUpdateParameters :param str project: Project ID or project name :rtype: :class:`<TestVariable> <azure.devops.v7_1.test_plan.models.TestVariable>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(test_variable_create_update_parameters, 'TestVariableCreateUpdateParameters') response = self._send(http_method='POST', location_id='2c61fac6-ac4e-45a5-8c38-1c2b8fd8ea6c', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('TestVariable', response) def delete_test_variable(self, project, test_variable_id): """DeleteTestVariable. [Preview API] Delete a test variable by its ID. :param str project: Project ID or project name :param int test_variable_id: ID of the test variable to delete. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if test_variable_id is not None: route_values['testVariableId'] = self._serialize.url('test_variable_id', test_variable_id, 'int') self._send(http_method='DELETE', location_id='2c61fac6-ac4e-45a5-8c38-1c2b8fd8ea6c', version='7.1-preview.1', route_values=route_values) def get_test_variable_by_id(self, project, test_variable_id): """GetTestVariableById. [Preview API] Get a test variable by its ID. :param str project: Project ID or project name :param int test_variable_id: ID of the test variable to get. :rtype: :class:`<TestVariable> <azure.devops.v7_1.test_plan.models.TestVariable>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if test_variable_id is not None: route_values['testVariableId'] = self._serialize.url('test_variable_id', test_variable_id, 'int') response = self._send(http_method='GET', location_id='2c61fac6-ac4e-45a5-8c38-1c2b8fd8ea6c', version='7.1-preview.1', route_values=route_values) return self._deserialize('TestVariable', response) def get_test_variables(self, project, continuation_token=None): """GetTestVariables. [Preview API] Get a list of test variables. :param str project: Project ID or project name :param str continuation_token: If the list of variables returned is not complete, a continuation token to query next batch of variables is included in the response header as "x-ms-continuationtoken". Omit this parameter to get the first batch of test variables. :rtype: :class:`<[TestVariable]> <azure.devops.v7_1.test_plan.models.[TestVariable]>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') response = self._send(http_method='GET', location_id='2c61fac6-ac4e-45a5-8c38-1c2b8fd8ea6c', version='7.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TestVariable]', self._unwrap_collection(response)) def update_test_variable(self, test_variable_create_update_parameters, project, test_variable_id): """UpdateTestVariable. [Preview API] Update a test variable by its ID. :param :class:`<TestVariableCreateUpdateParameters> <azure.devops.v7_1.test_plan.models.TestVariableCreateUpdateParameters>` test_variable_create_update_parameters: TestVariableCreateUpdateParameters :param str project: Project ID or project name :param int test_variable_id: ID of the test variable to update. :rtype: :class:`<TestVariable> <azure.devops.v7_1.test_plan.models.TestVariable>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if test_variable_id is not None: route_values['testVariableId'] = self._serialize.url('test_variable_id', test_variable_id, 'int') content = self._serialize.body(test_variable_create_update_parameters, 'TestVariableCreateUpdateParameters') response = self._send(http_method='PATCH', location_id='2c61fac6-ac4e-45a5-8c38-1c2b8fd8ea6c', version='7.1-preview.1', route_values=route_values, content=content) return self._deserialize('TestVariable', response)
azure-devops-python-api/azure-devops/azure/devops/v7_1/test_plan/test_plan_client.py/0
{ "file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/test_plan/test_plan_client.py", "repo_id": "azure-devops-python-api", "token_count": 24015 }
380
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from setuptools import setup, find_namespace_packages NAME = "azure-devops" VERSION = "7.1.0b4" # To install the library, run the following # # python setup.py install # # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools REQUIRES = [ "msrest>=0.7.1,<0.8.0" ] CLASSIFIERS = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'License :: OSI Approved :: MIT License', ] setup( name=NAME, version=VERSION, license='MIT', description="Python wrapper around the Azure DevOps 7.x APIs", author="Microsoft Corporation", url="https://github.com/microsoft/azure-devops-python-api", keywords=["Microsoft", "VSTS", "Team Services", "SDK", "AzureTfs", "AzureDevOps", "DevOps"], install_requires=REQUIRES, classifiers=CLASSIFIERS, packages=find_namespace_packages(), include_package_data=True, python_requires=">=3.7", long_description="Azure DevOps Python clients" )
azure-devops-python-api/azure-devops/setup.py/0
{ "file_path": "azure-devops-python-api/azure-devops/setup.py", "repo_id": "azure-devops-python-api", "token_count": 536 }
381
# Contributing to Azure-Quantum-Python # If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](https://opensource.microsoft.com/collaborate/). ## Pre-requisites Install pre-reqs: ```bash pip install azure_devtools pytest pytest-azurepipelines pytest-cov ``` ## Building and testing ## The Azure Quantum team uses [Anaconda](https://www.anaconda.com/products/individual) to create virtual environments for local unit and integration testing as well as in CI/CD. To create a new conda environment for the `azure-quantum` package, run at the root of the `azure-quantum` directory: ```bash conda env create -f environment.yml ``` Then to activate the environment: ```bash conda activate azurequantum ``` In case you have created the conda environment a while ago, you can make sure you have the latest versions of all dependencies by updating your environment: ```bash conda env update -f environment.yml --prune ``` ### Install the local development package ### To install the package in development mode, run: ```bash pip install -e . ``` ### Unit tests ### To run the unit tests, run `pytest` from the root of the `azure-quantum` directory: ```bash pytest ``` To run the a specific unit test class, run: ```bash pytest ./tests/unit/test_job.py ``` To run the a specific unit test case, run: ```bash pytest -k test_job_refresh ``` #### Recordings #### To read more about how to create and update recordings for testing code that interacts with a live API, see the [Azure Quantum Unit tests README](./azure-quantum/tests/README.md). Before merging your code contribution to `main`, make sure that all new code is covered by unit tests and that the unit tests have up-to-date recordings. If you recorded your tests and then updated or refactored the code afterwards, remember to re-record the tests. ### Update/re-generate the Azure Quantum internal SDK client based on Swagger ### The internal Azure Quantum Python SDK client (`azure/quantum/_client`) needs to be re-generated every time there is a change in the [Azure Quantum Service API definition](https://github.com/Azure/azure-rest-api-specs/tree/main/specification/quantum/data-plane) (aka Swagger). To re-generate the client based on the latest published API definition simply run the following PowerShell script ```powershell ./eng/Generate-DataPlane-Client.ps1 ``` > See the Generate-DataPlane-Client.ps1 script for more options After re-generating the client make sure to: 1. Re-run/Re-record all unit tests against the live-service (you can run `./eng/Record-Tests.ps1`) 1. If necessary, adjust the convenience layer for breaking-changes or to expose new features 1. Add new unit-tests for new features and record them too ### Building the `azure-quantum` Package wheel ### The Azure Quantum Python SDK uses a standard `setuptools`-based packaging strategy. To build a platform-independent wheel, run the setup script with `bdist_wheel` instead: ```bash python setup.py bdist_wheel ``` By default, this will create a `azure-quantum` wheel in `dist/` with the version number set to 0.0.0.1. To provide a more useful version number, set the `PYTHON_VERSION` environment variable before running `setup.py`. ### Environment Variables ### In addition to the [common Azure SDK environment variables](https://azure.github.io/azure-sdk/general_azurecore.html#environment-variables), you can also set the following environment variables to change the behaviour of the Azure Quantum SDK for Python: | Environment Variable | Description | | -------------------------------- | ---------------------------------------------------------------------- | | AZURE_QUANTUM_PYTHON_APPID | Prefixes the HTTP User-Agent header with the specified value | ## Code of Conduct ## This project's code of conduct can be found in the [CODE_OF_CONDUCT.md file](https://github.com/microsoft/azure-quantum-python/blob/main/CODE_OF_CONDUCT.md).
azure-quantum-python/CONTRIBUTING.md/0
{ "file_path": "azure-quantum-python/CONTRIBUTING.md", "repo_id": "azure-quantum-python", "token_count": 1194 }
382
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._client import QuantumClient from ._version import VERSION __version__ = VERSION try: from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import except ImportError: _patch_all = [] from ._patch import patch_sdk as _patch_sdk __all__ = [ "QuantumClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk()
azure-quantum-python/azure-quantum/azure/quantum/_client/__init__.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/_client/__init__.py", "repo_id": "azure-quantum-python", "token_count": 227 }
383
## # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. ## """Defines argument types for Microsoft Estimator""" from .types import EmptyArray, Pauli, Range, Result __all__ = ['EmptyArray', 'Pauli', 'Range', 'Result']
azure-quantum-python/azure-quantum/azure/quantum/argument_types/__init__.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/argument_types/__init__.py", "repo_id": "azure-quantum-python", "token_count": 73 }
384
## # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. ## import abc from typing import TYPE_CHECKING, Union from azure.quantum._client.models import ItemDetails, ItemType, SessionDetails, JobDetails if TYPE_CHECKING: from azure.quantum.workspace import Workspace __all__ = ["WorkspaceItem"] class WorkspaceItem(abc.ABC): """ Workspace item base class. :param workspace: Workspace instance to submit job to :type workspace: Workspace :param details: Item details model, contains item ID, name and other details :type details: ItemDetails """ def __init__(self, workspace: "Workspace", details: ItemDetails, **kwargs): self._workspace = workspace self._details = details self._item_type = details.item_type @property def workspace(self) -> "Workspace": """Workspace of the Workspace item""" return self._workspace @property def details(self) -> Union[SessionDetails, JobDetails]: """Workspace item details""" return self._details @property def id(self) -> str: """Id of the Workspace item""" return self._details.id @property def item_type(self) -> ItemType: """Workspace item type""" return self._item_type
azure-quantum-python/azure-quantum/azure/quantum/job/workspace_item.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/job/workspace_item.py", "repo_id": "azure-quantum-python", "token_count": 466 }
385
## # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. ## from typing import Any, Dict, List from warnings import warn from azure.quantum.target.target import ( Target, _determine_shots_or_deprecated_num_shots, ) from azure.quantum.job.job import Job from azure.quantum.workspace import Workspace from azure.quantum._client.models import CostEstimate, UsageEvent COST_1QUBIT_GATE_MAP = { "ionq.simulator" : 0.0, "ionq.qpu" : 0.00003, "ionq.qpu.aria-1" : 0.0002205, "ionq.qpu.aria-2" : 0.0002205, "ionq.qpu.forte-1" : 0.0002205 } COST_2QUBIT_GATE_MAP = { "ionq.simulator" : 0.0, "ionq.qpu" : 0.0003, "ionq.qpu.aria-1" : 0.00098, "ionq.qpu.aria-2" : 0.00098, "ionq.qpu.forte-1" : 0.00098 } MIN_PRICE_MAP = { "ionq.simulator" : 0.0, "ionq.qpu" : 1.0, "ionq.qpu.aria-1" : 97.5, "ionq.qpu.aria-2" : 97.5, "ionq.qpu.forte-1" : 97.5 } def int_to_bitstring(k: int, num_qubits: int, measured_qubit_ids: List[int]): # flip bitstring to convert to little Endian bitstring = format(int(k), f"0{num_qubits}b")[::-1] # flip bitstring to convert back to big Endian return "".join([bitstring[n] for n in measured_qubit_ids])[::-1] class IonQ(Target): """IonQ target.""" target_names = ( "ionq.qpu", "ionq.simulator", "ionq.qpu.aria-1", "ionq.qpu.aria-2", "ionq.qpu.forte-1" ) _SHOTS_PARAM_NAME = "shots" def __init__( self, workspace: Workspace, name: str = "ionq.simulator", input_data_format: str = "ionq.circuit.v1", output_data_format: str = "ionq.quantum-results.v1", capability: str = "BasicExecution", provider_id: str = "IonQ", content_type: str = "application/json", encoding: str = "", **kwargs ): super().__init__( workspace=workspace, name=name, input_data_format=input_data_format, output_data_format=output_data_format, capability=capability, provider_id=provider_id, content_type=content_type, encoding=encoding, **kwargs ) def submit( self, circuit: Dict[str, Any] = None, name: str = "ionq-job", shots: int = None, input_params: Dict[str, Any] = None, **kwargs ) -> Job: """Submit an IonQ circuit (JSON format) :param circuit: Quantum circuit in IonQ JSON format (for examples, see: https://docs.ionq.com/#section/Sample-JSON-Circuits) :type circuit: Dict[str, Any] :param name: Job name :type name: str :param shots: Number of shots, defaults to None :type shots: int :param input_params: Optional input params dict :type input_params: Dict[str, Any] :return: Azure Quantum job :rtype: Job """ input_data = kwargs.pop("input_data", circuit) if input_data is None: raise ValueError( "Either the `circuit` parameter or the `input_data` parameter must have a value." ) if input_params is None: input_params = {} num_shots = kwargs.pop("num_shots", None) shots = _determine_shots_or_deprecated_num_shots( shots=shots, num_shots=num_shots, ) return super().submit( input_data=input_data, name=name, shots=shots, input_params=input_params, **kwargs ) def estimate_cost( self, circuit: Dict[str, Any], num_shots: int = None, price_1q: float = None, price_2q: float = None, min_price: float = None, shots: int = None ) -> CostEstimate: """Estimate the cost of submitting a circuit to IonQ targets. Optionally, you can provide the number of gate and measurement operations manually. The actual price charged by the provider may differ from this calculation. Specify pricing details for your area to get most accurate results. By default, this function charges depending on the target: ionq.qpu: price_1q = 0.00003 USD for a single-qubit gate. price_2q = 0.0003 USD for a two-qubit gate. min_price = 1 USD, total minimum price per circuit. ionq.qpu.aria-1: price_1q = 0.00022 USD for a single-qubit gate. price_2q = 0.00098 USD for a two-qubit gate. min_price = 1 USD, total minimum price per circuit. For the most current pricing details, see https://docs.microsoft.com/azure/quantum/provider-ionq#pricing or find your workspace and view pricing options in the "Provider" tab of your workspace: https://aka.ms/aq/myworkspaces :param circuit: Quantum circuit in IonQ JSON format (for examples, see: https://docs.ionq.com/#section/Sample-JSON-Circuits) :type circuit: Dict[str, Any] :param num_shots: Number of shots, defaults to None :type num_shots: int :param price_1q: The price of running a single-qubit gate for one shot. :type price_1q: float :param price_2q: The price of running a double-qubit gate for one shot. :type price_2q: float :param min_price: The minimum price for running a job. :type min_price: float :param shots: Number of shots, defaults to None :type shots: int """ if num_shots is None and shots is None: raise ValueError("The 'shots' parameter has to be specified") if num_shots is not None: warn( "The 'num_shots' parameter will be deprecated. Please, use 'shots' parameter instead.", category=DeprecationWarning, ) shots = num_shots def is_1q_gate(gate: Dict[str, Any]): return "controls" not in gate and "control" not in gate def is_multi_q_gate(gate): return "controls" in gate or "control" in gate def num_2q_gates(gate): controls = gate.get("controls") if controls is None or len(controls) == 1: # Only one control qubit return 1 # Multiple control qubits return 6 * (len(controls) - 2) # Get the costs for the gates depending on the provider if not specified if price_1q is None: price_1q = COST_1QUBIT_GATE_MAP[self.name] if price_2q is None: price_2q = COST_2QUBIT_GATE_MAP[self.name] if min_price is None: min_price = MIN_PRICE_MAP[self.name] gates = circuit.get("circuit", []) N_1q = sum(map(is_1q_gate, gates)) N_2q = sum(map(num_2q_gates, filter(is_multi_q_gate, gates))) price = (price_1q * N_1q + price_2q * N_2q) * shots price = max(price, min_price) return CostEstimate( events = [ UsageEvent( dimension_id="gs1q", dimension_name="1Q Gate Shot", measure_unit="1q gate shot", amount_billed=0.0, amount_consumed=N_1q * shots, unit_price=0.0 ), UsageEvent( dimension_id="gs2q", dimension_name="2Q Gate Shot", measure_unit="2q gate shot", amount_billed=0.0, amount_consumed=N_2q * shots, unit_price=0.0 ) ], currency_code="USD", estimated_total=price )
azure-quantum-python/azure-quantum/azure/quantum/target/ionq.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/target/ionq.py", "repo_id": "azure-quantum-python", "token_count": 3875 }
386
"""Defines targets and helper functions for the Rigetti provider""" ## # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. ## __all__ = [ "InputParams", "Rigetti", "RigettiTarget", ] from dataclasses import dataclass from enum import Enum from typing import Union, Any, Dict, List, Optional from ..target import Target from ... import Job from ...workspace import Workspace class RigettiTarget(str, Enum): """The known targets for the Rigetti provider See https://qcs.rigetti.com/qpus for details on a QPU target. """ QVM = "rigetti.sim.qvm" """A simulator target for Quil. See https://github.com/quil-lang/qvm for more info.""" ANKAA_2 = "rigetti.qpu.ankaa-2" def simulators() -> List[str]: """Returns a list of simulator targets""" return [ RigettiTarget.QVM.value, ] def qpus() -> List[str]: """Returns a list of QPU targets""" return [ RigettiTarget.ANKAA_2.value, ] def num_qubits(target_name) -> int: """Returns the number of qubits supported by the given target""" if target_name == RigettiTarget.QVM.value: return 20 elif target_name == RigettiTarget.ANKAA_2.value: return 84 else: raise ValueError(f"Unknown target {target_name}") @dataclass class InputParams: count: int = 1 """The number of times to run the experiment. Will correspond to the length of each ``azure.quantum.target.rigetti.Readout`` """ skip_quilc: bool = False """ If set to True, `quilc`_ will not be run. This **must** be set true if using `Quil-T <https://pyquil-docs.rigetti.com/en/stable/quilt.html>`_. .. _quilc: https://github.com/quil-lang/quilc """ substitutions: Optional[Dict[str, List[List[float]]]] = None """A dictionary of memory region names to the list of value vectors to write to that region. For example, a job with this Quil program: .. code-block:: DECLARE ro BIT[2] DECLARE theta REAL[2] DECLARE beta REAL[1] RX(theta[0]) 0 RX(theta[1]) 1 RX(beta) 2 MEASURE 0 ro[0] MEASURE 1 ro[1] MEASURE 2 ro[2] might be run with .. highlight:: python .. code-block:: InputParams( substitutions={ "theta": [ [0.0, np.pi], [np.pi, 0.0], ], "beta": [ [2 * np.pi], [2 * np.pi], ] }, count=2, ) The resulting job will be run for each set of parameters in the list. So in the first run, theta[0] will be set to 0.0, theta[1] will be set to np.pi, and beta will be set to 2 * np.pi. Each run is executed for ``count`` shots, so you'd expect a result like ``{"ro": [[0, 1, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]]} for a total of 4 measurement vectors. Note that the length of the inner list must equal the length of the memory region—that's the ``2`` in ``DECLARE theta REAL[2]``. The length of the (outer) list comprising substitution vectors for each region must be equal. So if you are passing two sets of parameters to ``theta`` (list of length two) and you have another region named ``beta``, then ``beta`` must also be a list of length two. """ class Rigetti(Target): """Rigetti target, defaults to the simulator RigettiTarget.QVM In order to process the results of a Quil input to this target, we recommend using the included Result class. """ target_names = tuple(target.value for target in RigettiTarget) _SHOTS_PARAM_NAME = "count" def __init__( self, workspace: Workspace, name: Union[RigettiTarget, str] = RigettiTarget.QVM, input_data_format: str = "rigetti.quil.v1", output_data_format: str = "rigetti.quil-results.v1", capability: str = "BasicExecution", provider_id: str = "rigetti", encoding: str = "", **kwargs, ): """ Initializes a new target. :param workspace: Associated workspace :type workspace: Workspace :param name: Target name :type name: str :param input_data_format: Format of input data (ex. "rigetti.quil.v1") :type input_data_format: str :param output_data_format: Format of output data (ex. "rigetti.quil-results.v1") :type output_data_format: str :param capability: QIR capability :type capability: str :param provider_id: Id of provider (ex. "rigetti") :type provider_id: str :param encoding: "Content-Encoding" attribute value to set on input blob (ex. "gzip") :type encoding: str """ super().__init__( workspace=workspace, name=name, input_data_format=input_data_format, output_data_format=output_data_format, capability=capability, provider_id=provider_id, content_type="text/plain", encoding=encoding, **kwargs, ) def submit( self, input_data: Any, name: str = "azure-quantum-job", shots: int = None, input_params: Union[InputParams, None, Dict[str, Any]] = None, **kwargs, ) -> Job: """Submit input data and return Job. Provide input_data_format, output_data_format and content_type keyword arguments to override default values. :param input_data: Input data :type input_data: Any :param name: Job name :type name: str :param shots: Number of shots, defaults to None :type shots: int :param input_params: Input parameters, see :class:`azure.quantum.target.rigetti.InputParams` for details. :type input_params: Union[InputParams, None, Dict[str, Any]] :return: Azure Quantum job :rtype: Job """ if isinstance(input_params, InputParams): typed_input_params = input_params input_params = { Rigetti._SHOTS_PARAM_NAME: typed_input_params.count, "skipQuilc": typed_input_params.skip_quilc, } if typed_input_params.substitutions is not None: input_params["substitutions"] = typed_input_params.substitutions elif input_params is None: input_params = {} return super().submit( input_data=input_data, name=name, shots=shots, input_params=input_params, **kwargs )
azure-quantum-python/azure-quantum/azure/quantum/target/rigetti/target.py/0
{ "file_path": "azure-quantum-python/azure-quantum/azure/quantum/target/rigetti/target.py", "repo_id": "azure-quantum-python", "token_count": 3042 }
387
## # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. ## # Physical resource estimation for RSA using a pre-compiled QIR code import argparse import os from azure.quantum import Workspace from azure.quantum.target.microsoft import MicrosoftEstimator, QubitParams, \ QECScheme # Configure program arguments parser = argparse.ArgumentParser( prog="rsa", description="Physical resource estimation for RSA using a pre-compiled " "QIR code") parser.add_argument( "-r", "--resource-id", default=os.environ.get("AZURE_QUANTUM_RESOURCE_ID"), help="Resource ID of Azure Quantum workspace (must be set, unless set via " "environment variable AZURE_QUANTUM_RESOURCE_ID)") parser.add_argument( "-l", "--location", default=os.environ.get("AZURE_QUANTUM_LOCATION"), help="Location of Azure Quantum workspace (must be set, unless set via " "environment AZURE_QUANTUM_LOCATION)") # Parse and validate arguments args = parser.parse_args() if not args.resource_id: parser.error("the following arguments are required: -r/--resource-id") if not args.location: parser.error("the following arguments are required: -l/--location") # download QIR bitcode import urllib.request bitcode = urllib.request.urlopen("https://aka.ms/RE/eh_factoring").read() # connect to Azure Quantum workspace (you can find the information for your # resource_id and location on the Overview page of your Quantum workspace) workspace = Workspace(resource_id=args.resource_id, location=args.location) estimator = MicrosoftEstimator(workspace) params = estimator.make_params(num_items=4) params.arguments["product"] = "25195908475657893494027183240048398571429282126204032027777137836043662020707595556264018525880784406918290641249515082189298559149176184502808489120072844992687392807287776735971418347270261896375014971824691165077613379859095700097330459748808428401797429100642458691817195118746121515172654632282216869987549182422433637259085141865462043576798423387184774447920739934236584823824281198163815010674810451660377306056201619676256133844143603833904414952634432190114657544454178424020924616515723350778707749817125772467962926386356373289912154831438167899885040445364023527381951378636564391212010397122822120720357" params.arguments["generator"] = 7 params.arguments["exp_window_len"] = 5 params.arguments["mul_window_len"] = 5 # Error budget params.error_budget = 0.333 # Gate-based (reasonable) params.items[0].qubit_params.name = QubitParams.GATE_NS_E3 # Gate-based (optimistic) params.items[1].qubit_params.name = QubitParams.GATE_NS_E4 # Majorana (reasonable) params.items[2].qubit_params.name = QubitParams.MAJ_NS_E4 params.items[2].qec_scheme.name = QECScheme.FLOQUET_CODE # Majorana (optimistic) params.items[3].qubit_params.name = QubitParams.MAJ_NS_E6 params.items[3].qec_scheme.name = QECScheme.FLOQUET_CODE job = estimator.submit(bitcode, input_params=params) results = job.get_results() table = results.summary_data_frame(labels=[ "Gate-based (reasonable)", "Gate-based (optimistic)", "Majorana (reasonable)", "Majorana (optimistic)" ]) print() print(table[["Physical qubits", "Physical runtime"]]) ## Access non-formatted values, e.g., # print(results[0]["physicalCounts"]["physicalQubits"]) # print(results[0]["physicalCounts"]["runtime"])
azure-quantum-python/azure-quantum/examples/resource_estimation/rsa.py/0
{ "file_path": "azure-quantum-python/azure-quantum/examples/resource_estimation/rsa.py", "repo_id": "azure-quantum-python", "token_count": 1230 }
388
60 O 97.873900000 103.017000000 100.816000000 H 98.128600000 103.038000000 99.848800000 H 97.173800000 102.317000000 100.960000000 O 100.645000000 100.169000000 95.891500000 H 101.491000000 100.305000000 96.406200000 H 99.888700000 100.618000000 96.367800000 O 99.814000000 100.835000000 101.232000000 H 99.329200000 99.976800000 101.063000000 H 99.151600000 101.561000000 101.414000000 O 98.804000000 98.512200000 97.758100000 H 99.782100000 98.646900000 97.916700000 H 98.421800000 99.326500000 97.321300000 O 100.747000000 100.164000000 103.736000000 H 100.658000000 100.628000000 102.855000000 H 100.105000000 99.398600000 103.776000000 O 98.070300000 98.516900000 100.438000000 H 97.172800000 98.878600000 100.690000000 H 98.194000000 98.592200000 99.448100000 O 98.548000000 101.265000000 97.248600000 H 98.688900000 102.140000000 97.711000000 H 97.919900000 101.391000000 96.480800000 O 103.898000000 98.427900000 99.984500000 H 103.015000000 98.654900000 99.573700000 H 104.128000000 97.477300000 99.776100000 O 99.166600000 96.442100000 101.723000000 H 98.843200000 97.206600000 101.166000000 H 99.643900000 95.783700000 101.141000000 O 102.891000000 100.842000000 97.477600000 H 103.837000000 100.662000000 97.209700000 H 102.868000000 101.166000000 98.423400000 O 96.227200000 100.990000000 101.698000000 H 96.148800000 100.422000000 102.517000000 H 95.313600000 101.237000000 101.375000000 O 98.864800000 98.222500000 103.917000000 H 98.949800000 97.463000000 103.272000000 H 99.054800000 97.896400000 104.843000000 O 104.578000000 100.035000000 101.952000000 H 104.419000000 101.011000000 101.802000000 H 104.206000000 99.514900000 101.184000000 O 102.429000000 104.060000000 101.348000000 H 101.757000000 103.665000000 101.974000000 H 102.209000000 105.021000000 101.185000000 O 98.708200000 103.752000000 98.244300000 H 98.397100000 104.234000000 97.425400000 H 99.598500000 104.111000000 98.524400000 O 95.630300000 99.996600000 98.245400000 H 96.540400000 100.410000000 98.268900000 H 94.982900000 100.638000000 97.834500000 O 102.360000000 101.551000000 99.964500000 H 102.675000000 102.370000000 100.444000000 H 101.556000000 101.180000000 100.430000000 O 101.836000000 97.446700000 102.110000000 H 100.860000000 97.397400000 101.898000000 H 101.991000000 97.133400000 103.047000000 O 101.665000000 98.316100000 98.319400000 H 101.904000000 99.233800000 98.002000000 H 102.224000000 97.640900000 97.837700000 O 99.984700000 103.272000000 102.307000000 H 99.640700000 103.104000000 103.231000000 H 99.216500000 103.453000000 101.693000000
azure-quantum-python/azure-quantum/tests/unit/molecule.xyz/0
{ "file_path": "azure-quantum-python/azure-quantum/tests/unit/molecule.xyz", "repo_id": "azure-quantum-python", "token_count": 1574 }
389
## # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. ## from typing import Any, Dict, List, Tuple, Union import unittest import warnings import random import json import pytest import numpy as np import collections from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit.providers import JobStatus from qiskit.providers.models import BackendConfiguration from qiskit.providers import BackendV1 as Backend from qiskit.providers.exceptions import QiskitBackendNotFoundError from qiskit_ionq.exceptions import IonQGateError from qiskit_ionq import GPIGate, GPI2Gate, MSGate from common import QuantumTestBase, DEFAULT_TIMEOUT_SECS, LOCATION from test_workspace import SIMPLE_RESOURCE_ID from azure.quantum.workspace import Workspace from azure.quantum.qiskit import AzureQuantumProvider from azure.quantum.qiskit.job import ( MICROSOFT_OUTPUT_DATA_FORMAT, MICROSOFT_OUTPUT_DATA_FORMAT_V2, AzureQuantumJob, ) from azure.quantum.qiskit.backends.backend import ( AzureBackend, AzureQirBackend, ) from azure.quantum.qiskit.backends.quantinuum import QuantinuumEmulatorQirBackend from azure.quantum.qiskit.backends.ionq import IonQSimulatorQirBackend # This provider is used to stub out calls to the AzureQuantumProvider # There are live tests that use the available backends in the workspace # This provider is used to test the Qiskit plugin without making any # calls to Azure and just allows for filtering on the backends with the # given name for installed local backends and filtering criteria. class DummyProvider(AzureQuantumProvider): def __init__(self, workspace=None, **kwargs): self._available_in_ws = kwargs.get("available_in_ws", True) self._backends = None # don't init the parent class, we aren't going to use it # super().__init__(workspace, **kwargs) # Used to stub out calls to getting available targets def _get_allowed_targets_from_workspace( self, name: str, provider_id: str ) -> List[Tuple[str, str]]: backend_list = [x for v in self._backends.values() for x in v] selection = [] for backend in backend_list: if backend.name() == name: selection.append( (name, backend.configuration().to_dict()["azure"]["provider_id"]) ) return selection # Used to stub out calls to filtering available targets def _is_available_in_ws( self, allowed_targets: List[Tuple[str, str]], backend: Backend ): # only return true if the backend name is in the list of allowed targets return any( tup for tup in allowed_targets if tup[0] == backend.name() and tup[1] == backend.configuration().to_dict()["azure"]["provider_id"] ) class NoopQirBackend(AzureQirBackend): def __init__( self, configuration: BackendConfiguration, provider: "AzureQuantumProvider", **fields, ): default_config = BackendConfiguration.from_dict( { "backend_name": fields.pop("name", "sample"), "backend_version": fields.pop("version", "1.0"), "simulator": False, "local": False, "coupling_map": None, "description": "Simple backend for testing", "basis_gates": [], "memory": False, "n_qubits": 11, "conditional": False, "max_shots": 10000, "max_experiments": fields.pop("max_experiments", 1), "open_pulse": False, "gates": [{"name": "TODO", "parameters": [], "qasm_def": "TODO"}], "azure": self._azure_config(fields.pop("output_data_format", None)), } ) configuration: BackendConfiguration = fields.pop( "configuration", default_config ) super().__init__(configuration=configuration, provider=provider, **fields) def run( self, run_input: Union[QuantumCircuit, List[QuantumCircuit]] = [], **options ): return self._normalize_run_input_params(run_input, **options) def _azure_config(self, output_data_format=None) -> Dict[str, str]: values = { "blob_name": "inputData", "content_type": "qir.v1", "input_data_format": "qir.v1", } if output_data_format: values["output_data_format"] = output_data_format return values def _default_options(cls): return None def _translate_input( self, circuits: List[QuantumCircuit], input_params: Dict[str, Any] ) -> bytes: return None class NoopPassThruBackend(AzureBackend): def __init__( self, configuration: BackendConfiguration, provider: "AzureQuantumProvider", **fields, ): default_config = BackendConfiguration.from_dict( { "backend_name": fields.pop("name", "sample"), "backend_version": fields.pop("version", "1.0"), "simulator": False, "local": False, "coupling_map": None, "description": "Simple backend for testing", "basis_gates": [], "memory": False, "n_qubits": 11, "conditional": False, "max_shots": 10000, "max_experiments": fields.pop("max_experiments", 1), "open_pulse": False, "gates": [{"name": "TODO", "parameters": [], "qasm_def": "TODO"}], "azure": self._azure_config(fields), } ) configuration: BackendConfiguration = fields.pop( "configuration", default_config ) super().__init__(configuration=configuration, provider=provider, **fields) def run(self, run_input=None, **kwargs): return self._normalize_run_input_params(run_input, **kwargs) def _azure_config(self, fields) -> Dict[str, str]: return fields def _default_options(cls): return None def _translate_input(self, circuit): return None class TestQiskit(QuantumTestBase): """TestIonq Tests the azure.quantum.target.ionq module. """ def _3_qubit_ghz(self): circuit = QuantumCircuit(4, 3) circuit.name = "Qiskit Sample - 3-qubit GHZ circuit" circuit.h(0) circuit.cx(0, 1) circuit.cx(1, 2) circuit.h(3) # Helper qubit that is not measured circuit.measure([0, 1, 2], [0, 1, 2]) return circuit def _5_qubit_superposition(self): circuit = QuantumCircuit(5, 1) for q in range(5): circuit.h(q) circuit.measure([0], [0]) return circuit def _endianness(self, pos=0): self.assertLess(pos, 3) qr = QuantumRegister(3) cr = [ClassicalRegister(3) for _ in range(3)] circuit = QuantumCircuit(qr, *cr, name=f"endian{pos}cr3") circuit.x(pos) circuit.measure(qr[pos], cr[pos][pos]) return circuit def _controlled_s(self): circuit = QuantumCircuit(3) circuit.t(0) circuit.t(1) circuit.cx(0, 1) circuit.tdg(1) circuit.cx(0, 1) return circuit def test_unnamed_run_input_passes_through(self): backend = NoopPassThruBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run("default"), "default") backend = NoopQirBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run("default"), "default") def test_named_run_input_passes_through(self): backend = NoopPassThruBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run(run_input="default"), "default") backend = NoopQirBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run(run_input="default"), "default") def test_named_circuit_passes_through(self): backend = NoopPassThruBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run(circuit="default"), "default") backend = NoopQirBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run(circuit="default"), "default") def test_both_named_circuit_and_run_input_chooses_run_input(self): backend = NoopPassThruBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run(run_input="a", circuit="b"), "a") backend = NoopQirBackend(None, "AzureQuantumProvider") self.assertEqual(backend.run(run_input="a", circuit="b"), "a") def test_no_input_raises(self): backend = NoopPassThruBackend(None, "AzureQuantumProvider") with pytest.raises(ValueError) as exc_info: backend.run() backend = NoopQirBackend(None, "AzureQuantumProvider") with pytest.raises(ValueError) as exc_info: backend.run() def test_empty_input_raises(self): backend = NoopPassThruBackend(None, "AzureQuantumProvider") with pytest.raises(ValueError) as exc_info: backend.run([]) with pytest.raises(ValueError) as exc_info: backend.run(run_input=[]) with pytest.raises(ValueError) as exc_info: backend.run(circuit=[]) with pytest.raises(ValueError) as exc_info: backend.run(run_input=[], circuit=[]) with pytest.raises(ValueError) as exc_info: backend.run([], circuit=[]) backend = NoopQirBackend(None, "AzureQuantumProvider") with pytest.raises(ValueError) as exc_info: backend.run([]) with pytest.raises(ValueError) as exc_info: backend.run(run_input=[]) with pytest.raises(ValueError) as exc_info: backend.run(circuit=[]) with pytest.raises(ValueError) as exc_info: backend.run(run_input=[], circuit=[]) with pytest.raises(ValueError) as exc_info: backend.run([], circuit=[]) def test_qir_to_qiskit_bitstring(self): bits = random.choices(["0", "1"], k=50) bitstring = "".join(bits) azure_register = f"[{','.join(bits)}]" azure_registers = ",".join(f"[{bit}, 1, 0]" for bit in bits) self.assertEqual(AzureQuantumJob._qir_to_qiskit_bitstring(azure_register), bitstring) self.assertEqual(AzureQuantumJob._qir_to_qiskit_bitstring(azure_registers), " ".join( f"{bit}10" for bit in reversed(bits) )) self.assertEqual(AzureQuantumJob._qir_to_qiskit_bitstring(bitstring), bitstring) def test_qiskit_submit_ionq_5_qubit_superposition(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) self.assertIn("azure-quantum-qiskit", provider._workspace.user_agent) backend = provider.get_backend("ionq.simulator") num_shots = 1000 circuit = self._5_qubit_superposition() circuit.metadata = {"some": "data"} qiskit_job = backend.run(circuit, shots=num_shots) # Check job metadata: self.assertEqual(qiskit_job._azure_job.details.target, "ionq.simulator") self.assertEqual(qiskit_job._azure_job.details.provider_id, "ionq") self.assertEqual(qiskit_job._azure_job.details.input_data_format, "ionq.circuit.v1") self.assertEqual(qiskit_job._azure_job.details.output_data_format, "ionq.quantum-results.v1") self.assertIn("qiskit", qiskit_job._azure_job.details.metadata) self.assertIn("name", qiskit_job._azure_job.details.metadata) self.assertIn("meas_map", qiskit_job._azure_job.details.metadata) self.assertIn("metadata", qiskit_job._azure_job.details.metadata) # Make sure the job is completed before fetching the results self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() self.assertEqual(sum(result.data()["counts"].values()), num_shots) self.assertAlmostEqual(result.data()["counts"]["0"], num_shots // 2, delta=50) self.assertAlmostEqual(result.data()["counts"]["1"], num_shots // 2, delta=50) self.assertEqual(result.data()["probabilities"], {"0": 0.5, "1": 0.5}) counts = result.get_counts() self.assertEqual(counts, result.data()["counts"]) self.assertEqual(result.results[0].header.num_qubits, "5") self.assertEqual(result.results[0].header.metadata["some"], "data") def test_qiskit_provider_init_with_workspace_not_raises_deprecation(self): # testing warning according to https://docs.python.org/3/library/warnings.html#testing-warnings import warnings with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Try to trigger a warning. workspace = Workspace( resource_id=SIMPLE_RESOURCE_ID, location=LOCATION) AzureQuantumProvider(workspace) warns = [warn for warn in w if "Consider passing \"workspace\" argument explicitly." in warn.message.args[0]] # Verify assert len(warns) == 0 def test_qiskit_provider_init_without_workspace_raises_deprecation(self): # testing warning according to https://docs.python.org/3/library/warnings.html#testing-warnings with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Try to trigger a warning. AzureQuantumProvider( resource_id=SIMPLE_RESOURCE_ID, location=LOCATION) warns = [warn for warn in w if "Consider passing \"workspace\" argument explicitly." in warn.message.args[0]] # Verify assert len(warns) == 1 assert issubclass(warns[0].category, DeprecationWarning) # Validate rising deprecation warning even if workspace is passed, but other parameters are also passed with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Try to trigger a warning. workspace = Workspace( resource_id=SIMPLE_RESOURCE_ID, location=LOCATION) AzureQuantumProvider( workspace=workspace, resource_id=SIMPLE_RESOURCE_ID, location=LOCATION) warns = [warn for warn in w if "Consider passing \"workspace\" argument explicitly." in warn.message.args[0]] # Verify assert len(warns) == 1 assert issubclass(warns[0].category, DeprecationWarning) @pytest.mark.ionq def test_plugins_estimate_cost_qiskit_ionq(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) self.assertIn("azure-quantum-qiskit", provider._workspace.user_agent) backend = provider.get_backend("ionq.simulator") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 0.0) backend = provider.get_backend("ionq.qpu") cost = backend.estimate_cost(circuit, shots=1024) self.assertEqual(np.round(cost.estimated_total), 1.0) backend = provider.get_backend("ionq.qpu") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(np.round(cost.estimated_total), 66.0) ## The following two tests are skipped until we can use a workspace ## with this target available as part of the E2E tests. # backend = provider.get_backend("ionq.qpu.aria-1") # cost = backend.estimate_cost(circuit, shots=1024) # self.assertEqual(np.round(cost.estimated_total), 1.0) # backend = provider.get_backend("ionq.qpu.aria-1") # cost = backend.estimate_cost(circuit, shots=100e3) # self.assertEqual(np.round(cost.estimated_total), 240.0) @pytest.mark.ionq @pytest.mark.live_test def test_plugins_submit_qiskit_to_ionq(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_ionq(circuit) @pytest.mark.ionq @pytest.mark.live_test def test_plugins_submit_qiskit_circuit_as_list_to_ionq(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_ionq([circuit]) @pytest.mark.ionq @pytest.mark.live_test def test_plugins_submit_qiskit_multi_circuit_experiment_to_ionq(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) self.assertIn("azure-quantum-qiskit", provider._workspace.user_agent) backend = provider.get_backend("ionq.simulator") with pytest.raises(NotImplementedError) as exc: backend.run(circuit=[circuit, circuit], shots=500) self.assertEqual(str(exc.value), "Multi-experiment jobs are not supported!") @pytest.mark.ionq @pytest.mark.live_test def test_plugins_submit_qiskit_qobj_to_ionq(self): from qiskit import assemble circuit = self._3_qubit_ghz() qobj = assemble(circuit) self._test_qiskit_submit_ionq(circuit=qobj, shots=1024) def _qiskit_wait_to_complete( self, qiskit_job, provider, expected_status=JobStatus.DONE): job = qiskit_job._azure_job job.wait_until_completed(timeout_secs=DEFAULT_TIMEOUT_SECS) self.assertEqual(expected_status, qiskit_job.status()) qiskit_job = provider.get_job(job.id) self.assertEqual(expected_status, qiskit_job.status()) def test_plugins_submit_qiskit_to_ionq_with_shots_param(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("ionq.simulator") shots = 10 qiskit_job = backend.run(circuit, shots=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["shots"], shots) def test_plugins_submit_qiskit_to_ionq_with_default_shots(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("ionq.simulator") qiskit_job = backend.run(circuit) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["shots"], 500) def test_plugins_submit_qiskit_to_ionq_with_deprecated_count_param(self): """ Verify that a warning message is printed when the 'count' option is specified. This option was allowed in earlier versions, but now it is accepted only to keep existing user codebase compatible. """ circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("ionq.simulator") shots = 10 with pytest.warns( DeprecationWarning, match="The 'count' parameter will be deprecated. Please, use 'shots' parameter instead." ): qiskit_job = backend.run(circuit, count=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["shots"], shots) def _test_qiskit_submit_ionq(self, circuit, **kwargs): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) self.assertIn("azure-quantum-qiskit", provider._workspace.user_agent) backend = provider.get_backend("ionq.simulator") expected_data_format = ( kwargs["input_data_format"] if "input_data_format" in kwargs else "ionq.circuit.v1" ) shots = kwargs.get("shots", backend.options.shots) qiskit_job = backend.run(circuit, **kwargs) # Check job metadata: self.assertEqual(qiskit_job._azure_job.details.target, "ionq.simulator") self.assertEqual(qiskit_job._azure_job.details.provider_id, "ionq") self.assertEqual(qiskit_job._azure_job.details.input_data_format, expected_data_format) self.assertEqual(qiskit_job._azure_job.details.output_data_format, "ionq.quantum-results.v1") self.assertEqual(qiskit_job._azure_job.details.input_params["shots"], shots) self.assertIn("qiskit", qiskit_job._azure_job.details.metadata) self.assertIn("name", qiskit_job._azure_job.details.metadata) self.assertIn("metadata", qiskit_job._azure_job.details.metadata) self.assertIn("meas_map", qiskit_job._azure_job.details.metadata) # Make sure the job is completed before fetching the results self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() self.assertEqual(sum(result.data()["counts"].values()), shots) self.assertAlmostEqual(result.data()["counts"]["000"], shots // 2, delta=50) self.assertAlmostEqual(result.data()["counts"]["111"], shots // 2, delta=50) self.assertEqual(result.data()["probabilities"], {"000": 0.5, "111": 0.5}) counts = result.get_counts() self.assertEqual(counts, result.data()["counts"]) self.assertTrue(hasattr(result.results[0].header, "num_qubits")) self.assertTrue(hasattr(result.results[0].header, "metadata")) @pytest.mark.live_test def test_provider_returns_only_default_backends(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backends = provider.backends() # Check that all names are unique backend_names = [b.name() for b in backends] assert sorted(set(backend_names)) == sorted(backend_names) # Also check that all backends are default for b in backends: backend_config = b.configuration().to_dict() is_default_key_name = "is_default" if is_default_key_name in backend_config: continue if is_default_key_name in backend_config["azure"]: continue raise AssertionError(f"Backend '{str(b)}' is not default") @pytest.mark.live_test def test_get_backends_throws_on_more_than_one_backend_found(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) all_backends = provider.backends() if len(all_backends) > 1: with pytest.raises(QiskitBackendNotFoundError): provider.get_backend() else: # if there's 0 or 1 provider registered in the workspace pytest.skip() @pytest.mark.ionq def test_ionq_simulator_has_default(self): workspace = self.create_workspace() provider = DummyProvider(workspace=workspace) provider.get_backend("ionq.simulator") @pytest.mark.ionq def test_ionq_simulator_has_qir_target(self): provider = DummyProvider() backend = provider.get_backend("ionq.simulator", input_data_format="qir.v1") config = backend.configuration() input_data_format = config.azure["input_data_format"] self.assertEqual(input_data_format, "qir.v1") @pytest.mark.ionq def test_ionq_simulator_has_native_gateset_target(self): provider = DummyProvider() backend = provider.get_backend("ionq.simulator", gateset="native") config = backend.configuration() self.assertEqual(config.gateset, "native") @pytest.mark.ionq def test_ionq_simulator_has_qis_gateset_target(self): provider = DummyProvider() backend = provider.get_backend("ionq.simulator", gateset="qis") config = backend.configuration() self.assertEqual(config.gateset, "qis") @pytest.mark.ionq def test_ionq_simulator_default_target_has_qis_gateset(self): provider = DummyProvider() backend = provider.get_backend("ionq.simulator") config = backend.configuration() self.assertEqual(config.gateset, "qis") @pytest.mark.ionq def test_ionq_qpu_has_default(self): provider = DummyProvider() provider.get_backend("ionq.qpu") @pytest.mark.ionq def test_ionq_qpu_has_qir_target(self): provider = DummyProvider() backend = provider.get_backend("ionq.qpu", input_data_format="qir.v1") config = backend.configuration() input_data_format = config.azure["input_data_format"] self.assertEqual(input_data_format, "qir.v1") @pytest.mark.ionq def test_ionq_qpu_has_native_gateset_target(self): provider = DummyProvider() backend = provider.get_backend("ionq.qpu", gateset="native") config = backend.configuration() self.assertEqual(config.gateset, "native") @pytest.mark.ionq def test_ionq_qpu_has_qis_gateset_target(self): provider = DummyProvider() backend = provider.get_backend("ionq.qpu", gateset="qis") config = backend.configuration() self.assertEqual(config.gateset, "qis") @pytest.mark.ionq def test_ionq_qpu_default_target_has_qis_gateset(self): provider = DummyProvider() backend = provider.get_backend("ionq.qpu") config = backend.configuration() self.assertEqual(config.gateset, "qis") @pytest.mark.ionq def test_translate_ionq_qir(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = IonQSimulatorQirBackend("ionq.simulator", provider) input_params = backend._get_input_params({}) payload = backend._translate_input(circuit, input_params) config = backend.configuration() input_data_format = config.azure["input_data_format"] output_data_format = backend._get_output_data_format() self.assertIsInstance(payload, bytes) self.assertEqual(input_data_format, "qir.v1") self.assertEqual(output_data_format, MICROSOFT_OUTPUT_DATA_FORMAT) self.assertIn("items", input_params) self.assertEqual(len(input_params["items"]), 1) item = input_params["items"][0] self.assertIn("entryPoint", item) self.assertIn("arguments", item) @pytest.mark.ionq @pytest.mark.live_test def test_qiskit_get_ionq_qpu_target(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("ionq.qpu") self.assertEqual(backend.name(), "ionq.qpu") config = backend.configuration() self.assertFalse(config.simulator) self.assertEqual(1, config.max_experiments) self.assertEqual(11, config.num_qubits) self.assertEqual("application/json", config.azure["content_type"]) self.assertEqual("ionq", config.azure["provider_id"]) self.assertEqual("ionq.circuit.v1", config.azure["input_data_format"]) self.assertEqual("ionq.quantum-results.v1", config.azure["output_data_format"]) self.assertEqual("qis", backend.gateset()) @pytest.mark.ionq def test_ionq_aria_has_default(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-1") @pytest.mark.ionq def test_ionq_aria_has_qir_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-1", input_data_format="qir.v1") @pytest.mark.ionq def test_ionq_aria_has_native_gateset_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-1", gateset="native") @pytest.mark.ionq def test_ionq_aria_has_qis_gateset_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-1", gateset="qis") @pytest.mark.ionq def test_ionq_aria2_has_default(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-2") @pytest.mark.ionq def test_ionq_aria2_has_qir_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-2", input_data_format="qir.v1") @pytest.mark.ionq def test_ionq_aria2_has_native_gateset_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-2", gateset="native") @pytest.mark.ionq def test_ionq_aria2_has_qis_gateset_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.aria-2", gateset="qis") @pytest.mark.ionq def test_ionq_forte1_has_default(self): provider = DummyProvider() provider.get_backend("ionq.qpu.forte-1") @pytest.mark.ionq def test_ionq_forte1_has_qir_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.forte-1", input_data_format="qir.v1") @pytest.mark.ionq def test_ionq_forte1_has_native_gateset_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.forte-1", gateset="native") @pytest.mark.ionq def test_ionq_forte1_has_qis_gateset_target(self): provider = DummyProvider() provider.get_backend("ionq.qpu.forte-1", gateset="qis") # The following test is skipped until we can use a workspace # with this target available as part of the E2E tests. # @pytest.mark.ionq # #@pytest.mark.live_test # def test_qiskit_get_ionq_qpu_aria_target(self): # workspace = self.create_workspace() # provider = AzureQuantumProvider(workspace=workspace) # backend = provider.get_backend("ionq.qpu.aria-1") # self.assertEqual(backend.name(), "ionq.qpu.aria-1") # config = backend.configuration() # self.assertFalse(config.simulator) # self.assertEqual(1, config.max_experiments) # self.assertEqual(23, config.num_qubits) # self.assertEqual("ionq", config.azure["provider_id"]) # self.assertEqual("ionq.circuit.v1", config.azure["input_data_format"]) # self.assertEqual("ionq.quantum-results.v1", config.azure["output_data_format"]) @pytest.mark.ionq @pytest.mark.live_test def test_qiskit_get_ionq_native_gateset(self): # initialize a quantum circuit with native gates (see https://ionq.com/docs/using-native-gates-with-qiskit) native_circuit = QuantumCircuit(2, 2) native_circuit.append(MSGate(0, 0), [0, 1]) native_circuit.append(GPIGate(0), [0]) native_circuit.append(GPI2Gate(1), [1]) workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("ionq.simulator", gateset="native") config = backend.configuration() self.assertEqual("native", backend.gateset()) # Trying to translate a regular circuit using the native gateset should fail: with pytest.raises(IonQGateError) as exc: payload = backend._translate_input(self._3_qubit_ghz()) # however, translating the native circuit should work fine. payload = backend._translate_input(native_circuit) payload = json.loads(payload.decode("utf-8")) self.assertEqual("ms", payload["circuit"][0]["gate"]) # Confirm that the payload includes the gateset information. self.assertEqual("native", payload["gateset"]) # We also expect the metadata to be produced correctly for native circuits metadata = backend._prepare_job_metadata(native_circuit) self.assertEqual(2, len(metadata["meas_map"])) # should also be available with the qpu target backend = provider.get_backend("ionq.qpu", gateset="native") config = backend.configuration() self.assertEqual("native", backend.gateset()) payload = backend._translate_input(native_circuit) payload = json.loads(payload.decode("utf-8")) self.assertEqual("ms", payload["circuit"][0]["gate"]) metadata = backend._prepare_job_metadata(native_circuit) self.assertEqual(2, len(metadata["meas_map"])) @pytest.mark.ionq @pytest.mark.live_test def test_plugins_retrieve_job(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("ionq.simulator") circuit = self._3_qubit_ghz() qiskit_job = backend.run(circuit, shots=100) # Make sure the job is completed before fetching the results self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): fetched_job = backend.retrieve_job(qiskit_job.id()) self.assertEqual(fetched_job.id(), qiskit_job.id()) result = fetched_job.result() self.assertEqual(result.data()["probabilities"], {"000": 0.5, "111": 0.5}) self.assertEqual(sum(result.data()["counts"].values()), 100) self.assertAlmostEqual(result.data()["counts"]["000"], 50, delta=20) self.assertAlmostEqual(result.data()["counts"]["111"], 50, delta=20) @pytest.mark.quantinuum def test_plugins_estimate_cost_qiskit_quantinuum(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) self.assertIn("azure-quantum-qiskit", provider._workspace.user_agent) backend = provider.get_backend("quantinuum.sim.h1-1sc") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 0.0) backend = provider.get_backend("quantinuum.sim.h1-1e") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 745.0) backend = provider.get_backend("quantinuum.qpu.h1-1") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 745.0) backend = provider.get_backend("quantinuum.sim.h2-1sc") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 0.0) backend = provider.get_backend("quantinuum.sim.h2-1e") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 745.0) backend = provider.get_backend("quantinuum.qpu.h2-1") cost = backend.estimate_cost(circuit, shots=100e3) self.assertEqual(cost.estimated_total, 745.0) @pytest.mark.live_test def test_plugins_submit_qiskit_noexistent_target(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) with pytest.raises(QiskitBackendNotFoundError): provider.get_backend("provider.doesnotexist") @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_quantinuum(circuit) @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum_h2_1e(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_quantinuum(circuit, target="quantinuum.sim.h2-1e") @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum_h2_1sc(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_quantinuum(circuit, target="quantinuum.sim.h2-1sc") @pytest.mark.quantinuum @pytest.mark.skip("Target was unavailable at the moment of the recording") def test_plugins_submit_qiskit_to_quantinuum_h2_1qpu(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_quantinuum(circuit, target="quantinuum.qpu.h2-1") @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_circuit_as_list_to_quantinuum(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_quantinuum([circuit]) @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_multi_circuit_experiment_to_quantinuum(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("quantinuum.sim.h1-1e") self.assertIn("quantinuum.sim.h1-1e", backend.backend_names) self.assertIn(backend.backend_names[0], [ t.name for t in workspace.get_targets(provider_id="quantinuum") ]) with self.assertRaises(NotImplementedError) as context: backend.run(circuit=[circuit, circuit], shots=None) self.assertEqual(str(context.exception), "Multi-experiment jobs are not supported!") @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum_with_counts_param(self): """ This test verifies that we can pass a "provider-specific" shots number option. Even if the usage of the 'shots' option is encouraged, we should also be able to specify provider's native option ('count' in this case). """ circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(name="quantinuum.sim.h1-1e") shots = 10 with pytest.warns( match="Parameter 'count' is subject to change in future versions." ): qiskit_job = backend.run(circuit, count=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum_with_explicit_shots_param(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(name="quantinuum.sim.h1-1e") shots = 10 qiskit_job = backend.run(circuit, shots=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum_with_default_shots_param(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(name="quantinuum.sim.h1-1e") qiskit_job = backend.run(circuit) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], 500) @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum_with_conflicting_shots_and_count_from_options(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(name="quantinuum.sim.h1-1e") shots = 100 with pytest.warns( match="Parameter 'shots' conflicts with the 'count' parameter." ): qiskit_job = backend.run(circuit, shots=shots, count=10) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) @pytest.mark.quantinuum @pytest.mark.live_test def test_plugins_submit_qiskit_to_quantinuum_with_count_from_options(self): """ Check that backend also allows to specify shots by using a provider-specific option, but also throws warning with recommndation to use 'shots' """ circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(name="quantinuum.sim.h1-1e") shots = 100 with pytest.warns( match="Parameter 'count' is subject to change in future versions. Please, use 'shots' parameter instead." ): qiskit_job = backend.run(circuit, count=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) def _test_qiskit_submit_quantinuum(self, circuit, target="quantinuum.sim.h1-1e", **kwargs): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(target) expected_data_format = ( kwargs["input_data_format"] if "input_data_format" in kwargs else "honeywell.openqasm.v1" ) self.assertIn(target, backend.backend_names) self.assertIn(backend.backend_names[0], [ t.name for t in workspace.get_targets(provider_id="quantinuum") ]) if isinstance(circuit, list): num_qubits = circuit[0].num_qubits circuit[0].metadata = {"some": "data"} else: num_qubits = circuit.num_qubits circuit.metadata = {"some": "data"} qiskit_job = backend.run(circuit, **kwargs) # Check job metadata: self.assertEqual(qiskit_job._azure_job.details.target, target) self.assertEqual(qiskit_job._azure_job.details.provider_id, "quantinuum") self.assertEqual(qiskit_job._azure_job.details.input_data_format, expected_data_format) self.assertEqual(qiskit_job._azure_job.details.output_data_format, "honeywell.quantum-results.v1") self.assertIn("count", qiskit_job._azure_job.details.input_params) self.assertIn("qiskit", qiskit_job._azure_job.details.metadata) self.assertIn("name", qiskit_job._azure_job.details.metadata) self.assertIn("metadata", qiskit_job._azure_job.details.metadata) # Make sure the job is completed before fetching the results self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(JobStatus.DONE, qiskit_job.status()) result = qiskit_job.result() self.assertIn("counts", result.data()) self.assertIn("probabilities", result.data()) self.assertTrue(hasattr(result.results[0].header, "num_qubits")) self.assertEqual(result.results[0].header.num_qubits, str(num_qubits)) self.assertEqual(result.results[0].header.metadata["some"], "data") @pytest.mark.quantinuum def test_translate_quantinuum_qir(self): circuit = self._3_qubit_ghz() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = QuantinuumEmulatorQirBackend( "quantinuum.sim1h1-1e", provider ) input_params = backend._get_input_params({}) payload = backend._translate_input(circuit, input_params) config = backend.configuration() input_data_format = config.azure["input_data_format"] output_data_format = backend._get_output_data_format() self.assertIsInstance(payload, bytes) self.assertEqual(input_data_format, "qir.v1") self.assertEqual(output_data_format, MICROSOFT_OUTPUT_DATA_FORMAT) self.assertIn("items", input_params) self.assertEqual(len(input_params["items"]), 1) item = input_params["items"][0] self.assertIn("entryPoint", item) self.assertIn("arguments", item) @pytest.mark.quantinuum @pytest.mark.live_test def test_configuration_quantinuum_backends(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) # The following backends should have 20 qubits for target_name in [ "quantinuum.qpu.h1-1", "quantinuum.sim.h1-1sc", "quantinuum.sim.h1-1e" ]: config = provider.get_backend(target_name).configuration() # We check for name so the test log includes it when reporting a failure self.assertIsNotNone(target_name) self.assertEqual(20, config.num_qubits) # The following backends should have 32 qubits for target_name in [ "quantinuum.qpu.h2-1", "quantinuum.sim.h2-1sc", "quantinuum.sim.h2-1e", ]: config = provider.get_backend(target_name).configuration() # We check for name so the test log includes it when reporting a failure self.assertIsNotNone(target_name) self.assertEqual(32, config.num_qubits) @pytest.mark.rigetti @pytest.mark.live_test def test_qiskit_submit_to_rigetti(self): from azure.quantum.target.rigetti import RigettiTarget workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) self.assertIn("azure-quantum-qiskit", provider._workspace.user_agent) backend = provider.get_backend(RigettiTarget.QVM.value) self.assertEqual(backend.name(), RigettiTarget.QVM.value) config = backend.configuration() self.assertTrue(config.simulator) self.assertEqual(1, config.max_experiments) self.assertEqual(20, config.num_qubits) self.assertEqual("qir.v1", config.azure["content_type"]) self.assertEqual("rigetti", config.azure["provider_id"]) self.assertEqual("qir.v1", config.azure["input_data_format"]) self.assertEqual("microsoft.quantum-results.v1", backend._get_output_data_format()) shots = 100 circuit = self._3_qubit_ghz() qiskit_job = backend.run(circuit, shots=shots) # Check job metadata: self.assertEqual(qiskit_job._azure_job.details.target, RigettiTarget.QVM.value) self.assertEqual(qiskit_job._azure_job.details.provider_id, "rigetti") self.assertEqual(qiskit_job._azure_job.details.input_data_format, "qir.v1") self.assertEqual(qiskit_job._azure_job.details.output_data_format, "microsoft.quantum-results.v1") self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) self.assertEqual(qiskit_job._azure_job.details.input_params["items"][0]["entryPoint"], circuit.name) self.assertEqual(qiskit_job._azure_job.details.input_params["items"][0]["arguments"], []) # Make sure the job is completed before fetching the results self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() # verify we can get the counts with the circuit and without # These will throw if job metadata is incorrect self.assertIsNotNone(result.get_counts(circuit)) self.assertIsNotNone(result.get_counts()) self.assertIsNotNone(result.get_counts(0)) self.assertEqual(sum(result.data()["counts"].values()), shots) self.assertAlmostEqual(result.data()["counts"]["000"], shots // 2, delta=20) self.assertAlmostEqual(result.data()["counts"]["111"], shots // 2, delta=20) counts = result.get_counts() self.assertEqual(counts, result.data()["counts"]) @pytest.mark.rigetti @pytest.mark.live_test def test_qiskit_submit_to_rigetti_with_count_param(self): """ Check that backend also allows to specify shots by using a provider-specific option, but also throws warning with recommndation to use 'shots' """ from azure.quantum.target.rigetti import RigettiTarget workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(RigettiTarget.QVM.value) shots = 100 circuit = self._3_qubit_ghz() with pytest.warns( match="Parameter 'count' is subject to change in future versions. Please, use 'shots' parameter instead." ): qiskit_job = backend.run(circuit, count=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) @pytest.mark.rigetti @pytest.mark.live_test def test_qiskit_submit_to_rigetti_with_explicit_shots_param(self): from azure.quantum.target.rigetti import RigettiTarget workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(RigettiTarget.QVM.value) shots = 100 circuit = self._3_qubit_ghz() qiskit_job = backend.run(circuit, shots=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) @pytest.mark.rigetti @pytest.mark.live_test def test_qiskit_submit_to_rigetti_conflicting_shots_and_count_from_options(self): from azure.quantum.target.rigetti import RigettiTarget workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(RigettiTarget.QVM.value) shots = 100 circuit = self._3_qubit_ghz() with pytest.warns( match="Parameter 'shots' conflicts with the 'count' parameter. Please, provide only one option for setting shots. " "Defaulting to 'shots' parameter." ): qiskit_job = backend.run(circuit, shots=shots, count=10) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) @pytest.mark.rigetti @pytest.mark.live_test def test_qiskit_submit_to_rigetti_with_count_from_options(self): from azure.quantum.target.rigetti import RigettiTarget workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend(RigettiTarget.QVM.value) shots = 100 circuit = self._3_qubit_ghz() with pytest.warns( match="Parameter 'count' is subject to change in future versions. Please, use 'shots' parameter instead." ): qiskit_job = backend.run(circuit, count=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["count"], shots) @pytest.mark.rigetti @pytest.mark.live_test def test_qiskit_get_rigetti_qpu_targets(self): from azure.quantum.target.rigetti import RigettiTarget workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) try: backend = provider.get_backend(RigettiTarget.ANKAA_2.value) except QiskitBackendNotFoundError as ex: msg = f"Target {RigettiTarget.ANKAA_2} is not available for workspace {workspace.name}." warnings.warn(f"{msg}\nException:\n{QiskitBackendNotFoundError.__name__}\n{ex}") pytest.skip(msg) self.assertEqual(backend.name(), RigettiTarget.ANKAA_2.value) config = backend.configuration() self.assertFalse(config.simulator) self.assertEqual(1, config.max_experiments) self.assertEqual(84, config.num_qubits) self.assertEqual("qir.v1", config.azure["content_type"]) self.assertEqual("rigetti", config.azure["provider_id"]) self.assertEqual("qir.v1", config.azure["input_data_format"]) self.assertEqual("microsoft.quantum-results.v1", backend._get_output_data_format()) @pytest.mark.qci @pytest.mark.live_test def test_qiskit_submit_to_qci(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) self.assertIn("azure-quantum-qiskit", provider._workspace.user_agent) backend = provider.get_backend("qci.simulator") self.assertEqual(backend.name(), "qci.simulator") config = backend.configuration() self.assertTrue(config.simulator) self.assertEqual(1, config.max_experiments) self.assertEqual(29, config.num_qubits) self.assertEqual("qir.v1", config.azure["content_type"]) self.assertEqual("qci", config.azure["provider_id"]) self.assertEqual("qir.v1", config.azure["input_data_format"]) self.assertEqual("microsoft.quantum-results.v1", backend._get_output_data_format()) shots = 100 circuit = self._3_qubit_ghz() qiskit_job = backend.run(circuit, shots=shots) # Check job metadata: self.assertEqual(qiskit_job._azure_job.details.target, "qci.simulator") self.assertEqual(qiskit_job._azure_job.details.provider_id, "qci") self.assertEqual(qiskit_job._azure_job.details.input_data_format, "qir.v1") self.assertEqual(qiskit_job._azure_job.details.output_data_format, "microsoft.quantum-results.v1") self.assertEqual(qiskit_job._azure_job.details.input_params["shots"], shots) self.assertEqual(qiskit_job._azure_job.details.input_params["items"][0]["entryPoint"], circuit.name) self.assertEqual(qiskit_job._azure_job.details.input_params["items"][0]["arguments"], []) # Make sure the job is completed before fetching the results self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() print(result) self.assertEqual(sum(result.data()["counts"].values()), shots) self.assertAlmostEqual(result.data()["counts"]["000"], shots // 2, delta=20) self.assertAlmostEqual(result.data()["counts"]["111"], shots // 2, delta=20) counts = result.get_counts() self.assertEqual(counts, result.data()["counts"]) @pytest.mark.qci @pytest.mark.live_test def test_qiskit_submit_to_qci_with_default_shots(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("qci.simulator") circuit = self._3_qubit_ghz() qiskit_job = backend.run(circuit) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["shots"], 500) @pytest.mark.qci @pytest.mark.live_test def test_qiskit_submit_to_qci_with_deprecated_count_param(self): """ Verify that a warning message is printed when the 'count' option is specified. This option was allowed in earlier versions, but now it is accepted only to keep existing user codebase compatible. """ workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("qci.simulator") shots=10 circuit = self._3_qubit_ghz() with pytest.warns( DeprecationWarning, match="The 'count' parameter will be deprecated. Please, use 'shots' parameter instead." ): qiskit_job = backend.run(circuit, count=shots) self._qiskit_wait_to_complete(qiskit_job, provider) self.assertEqual(qiskit_job._azure_job.details.input_params["shots"], shots) @pytest.mark.qci @pytest.mark.live_test def test_qiskit_get_qci_qpu_targets(self): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("qci.machine1") self.assertEqual(backend.name(), "qci.machine1") config = backend.configuration() self.assertFalse(config.simulator) self.assertEqual(1, config.max_experiments) self.assertEqual(11, config.num_qubits) self.assertEqual("qir.v1", config.azure["content_type"]) self.assertEqual("qci", config.azure["provider_id"]) self.assertEqual("qir.v1", config.azure["input_data_format"]) self.assertEqual("microsoft.quantum-results.v1", backend._get_output_data_format()) # @pytest.mark.parametrize("endian_pos, expectation", # [(0,"000 000 001"), (1,"000 010 000"), (2,"100 000 000")] # ) @pytest.mark.qci @pytest.mark.live_test def test_qiskit_endianness_submit_to_qci( self, endian_pos=0, expectation="000 000 001" ): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("qci.simulator") shots = 100 circuit = self._endianness(pos=endian_pos) circuit.metadata = {"some": "data"} qiskit_job = backend.run(circuit, shots=shots) # Make sure the job is completed before fetching the results self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() print(result) self.assertEqual(sum(result.data()["counts"].values()), shots) self.assertEqual(result.data()["counts"][expectation], shots) @pytest.mark.microsoft_qc @pytest.mark.live_test def test_qiskit_controlled_s_to_resource_estimator(self): from pyqir import rt patcher = unittest.mock.patch.object(rt, "initialize") patcher.start() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("microsoft.estimator") circuit = self._controlled_s() qiskit_job = backend.run(circuit) # Make sure the job is completed before fetching results self._qiskit_wait_to_complete(qiskit_job, provider) patcher.stop() self.assertEqual(qiskit_job.status(), JobStatus.DONE) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() self.assertEqual(result.data()["logicalCounts"]["numQubits"], 2) self.assertEqual(result.data()["jobParams"]["qubitParams"]["name"], "qubit_gate_ns_e3") self.assertEqual(result.data()["jobParams"]["qecScheme"]["name"], "surface_code") self.assertEqual(result.data()["jobParams"]["errorBudget"], 0.001) @pytest.mark.microsoft_qc @pytest.mark.live_test def test_qiskit_controlled_s_to_resource_estimator_with_high_error_rate(self): from pyqir import rt patcher = unittest.mock.patch.object(rt, "initialize") patcher.start() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("microsoft.estimator") circuit = self._controlled_s() qiskit_job = backend.run( circuit, qubitParams={"name": "qubit_gate_ns_e4"}, errorBudget=0.0001 ) # Make sure the job is completed before fetching results self._qiskit_wait_to_complete(qiskit_job, provider) patcher.stop() self.assertEqual(qiskit_job.status(), JobStatus.DONE) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() self.assertEqual(result.data()["logicalCounts"]["numQubits"], 2) self.assertEqual(result.data()["jobParams"]["qubitParams"]["name"], "qubit_gate_ns_e4") self.assertEqual(result.data()["jobParams"]["qecScheme"]["name"], "surface_code") self.assertEqual(result.data()["jobParams"]["errorBudget"], 0.0001) @pytest.mark.microsoft_qc @pytest.mark.live_test def test_qiskit_controlled_s_to_resource_estimator_with_items(self): from pyqir import rt patcher = unittest.mock.patch.object(rt, "initialize") patcher.start() workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("microsoft.estimator") circuit = self._controlled_s() item1 = {"qubitParams": {"name": "qubit_gate_ns_e3"}, "errorBudget": 1e-4} item2 = {"qubitParams": {"name": "qubit_gate_ns_e4"}, "errorBudget": 1e-4} qiskit_job = backend.run(circuit, items=[item1, item2]) # Make sure the job is completed before fetching results self._qiskit_wait_to_complete(qiskit_job, provider) patcher.stop() self.assertEqual(qiskit_job.status(), JobStatus.DONE) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() self.assertEqual(result.data(0)["logicalCounts"]["numQubits"], 2) self.assertEqual(result.data(0)["jobParams"]["qubitParams"]["name"], "qubit_gate_ns_e3") self.assertEqual(result.data(0)["jobParams"]["qecScheme"]["name"], "surface_code") self.assertEqual(result.data(0)["jobParams"]["errorBudget"], 0.0001) self.assertEqual(result.data(1)["logicalCounts"]["numQubits"], 2) self.assertEqual(result.data(1)["jobParams"]["qubitParams"]["name"], "qubit_gate_ns_e4") self.assertEqual(result.data(1)["jobParams"]["qecScheme"]["name"], "surface_code") self.assertEqual(result.data(1)["jobParams"]["errorBudget"], 0.0001) def test_backend_without_azure_config_format_defaults_to_ms_format(self): backend = NoopQirBackend(None, "AzureQuantumProvider") output_data_format = backend._get_output_data_format() self.assertEqual(output_data_format, MICROSOFT_OUTPUT_DATA_FORMAT) def test_backend_with_azure_config_format_defaults_to_that_format(self): expected = "test_format" backend = NoopQirBackend( None, "AzureQuantumProvider", output_data_format=expected ) actual = backend._get_output_data_format() self.assertEqual(expected, actual) def test_backend_without_azure_config_format_and_multiple_experiment_support_defaults_to_ms_format_v2( self, ): backend = NoopQirBackend(None, "AzureQuantumProvider", **{"max_experiments": 2}) output_data_format = backend._get_output_data_format() self.assertEqual(output_data_format, MICROSOFT_OUTPUT_DATA_FORMAT_V2) def test_backend_with_azure_config_format_is_overridden_with_explicit_format(self): azure_congfig_value = "test_format" backend = NoopQirBackend( None, "AzureQuantumProvider", output_data_format=azure_congfig_value ) expected = "test_format_v2" options = {"output_data_format": expected} actual = backend._get_output_data_format(options) self.assertNotIn("output_data_format", options) self.assertEqual(expected, actual) def test_specifying_targetCapabilities_with_pass_thru_fails( self, ): from azure.quantum.qiskit.backends.quantinuum import QuantinuumEmulatorBackend backend = QuantinuumEmulatorBackend( "quantinuum.sim.h1-1sc", "AzureQuantumProvider" ) with pytest.raises(ValueError) as exc: # mimic the user passing in targetCapabilities as part of the run options _ = backend._run("", None, {"targetCapability": "BasicExecution"}, {}) actual = str(exc.value) expected = "The targetCapability parameter has been deprecated" self.assertTrue(actual.startswith(expected))
azure-quantum-python/azure-quantum/tests/unit/test_qiskit.py/0
{ "file_path": "azure-quantum-python/azure-quantum/tests/unit/test_qiskit.py", "repo_id": "azure-quantum-python", "token_count": 28220 }
390
--- page_type: sample author: alchocro description: Running your first job on the Azure Quantum service ms.author: alchocro@microsoft.com ms.date: 08/08/2022 languages: - python - qsharp products: - azure-quantum --- # Running your first job on the Azure Quantum service These are Azure Quantum sample notebooks that illustrate how to use Q#, Qiskit, or Cirq to write a simple program and run it against an Azure Quantum provider. These samples are available as part of the notebook samples gallery in Azure Quantum workspaces in the Azure Portal. For an example of how to run these notebooks in Azure, see [this getting started guide](https://learn.microsoft.com/azure/quantum/get-started-jupyter-notebook). ## Manifest - [HW-ionq-cirq.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-ionq-cirq.ipynb): Azure Quantum notebook for running a simple Cirq program on the IonQ simulator - [HW-ionq-qiskit.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-ionq-qiskit.ipynb): Azure Quantum notebook for running a simple Qiskit program on the IonQ simulator - [HW-ionq-qsharp.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-ionq-qsharp.ipynb): Azure Quantum notebook for running a simple Q# program on the IonQ simulator - [HW-quantinuum-cirq.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-quantinuum-cirq.ipynb): Azure Quantum notebook for running a simple Cirq program on the Quantinuum syntax checker - [HW-quantinuum-qiskit.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-quantinuum-qiskit.ipynb): Azure Quantum notebook for running a simple Qiskit program on the Quantinuum syntax checker - [HW-quantinuum-qsharp.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-quantinuum-qsharp.ipynb): Azure Quantum notebook for running a simple Q# program on the Quantinuum syntax checker - [HW-rigetti-qiskit.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-rigetti-qiskit.ipynb): Azure Quantum notebook for running a simple Qiskit program on the Rigetti simulator - [HW-rigetti-qsharp.ipynb](https://github.com/microsoft/azure-quantum-python/blob/main/samples/hello-world/HW-rigetti-qsharp.ipynb): Azure Quantum notebook for running a simple Q# program on the Rigetti simulator
azure-quantum-python/samples/hello-world/README.md/0
{ "file_path": "azure-quantum-python/samples/hello-world/README.md", "repo_id": "azure-quantum-python", "token_count": 800 }
391
# Visualization This folder contains visualizations for the resource estimation feature. - react-lib: source code for the visualization components and D3 integration. - js-lib: wrapper JavaScript library which packages the visualization components into a consumable JavaScript package by clients. Refer to **build** folder to build and package the js-lib and react-lib. For react-build project: ``` - npm run build // builds react-lib - npm run tests //runs all tests and code coverage - npm run testsonly //excludes code coverage - npm run updatetests //updates test snapshots and runs tests with no coverage ```
azure-quantum-python/visualization/README.md/0
{ "file_path": "azure-quantum-python/visualization/README.md", "repo_id": "azure-quantum-python", "token_count": 147 }
392
/*------------------------------------ Copyright (c) Microsoft Corporation. Licensed under the MIT License. All rights reserved. ------------------------------------ */ import * as React from "react"; import * as d3 from "d3"; import * as d3Format from "d3-format"; import { PieArcDatum } from "d3-shape"; import * as d3Helper from "./D3HelperFunctions"; import { TextStyle } from "./D3HelperFunctions"; export type DonutChartProps = { data: d3Helper.LegendData[]; width: number; height: number; innerRadius: number; outerRadius: number; }; /* Define styles */ const titleStyle: TextStyle = { fontFamily: "Segoe UI", fontStyle: "normal", fontWeight: "600", fontSize: "35", lineHeight: "47", display: "flex", alignItems: "center", textAlign: "center", color: "#201f1e", textAnchor: "middle", }; const donutMiddleTitleStyle: TextStyle = { fontSize: "18", color: "#323130", fontWeight: "400", fontStyle: "normal", textAnchor: "middle", fontFamily: "Segoe UI", lineHeight: "21", alignItems: "center", textAlign: "center", display: "flex", }; const donutMiddleTextStyle: TextStyle = { fontSize: "55", color: "#323130", fontWeight: "400", fontStyle: "normal", textAnchor: "middle", fontFamily: "Segoe UI", lineHeight: "73", alignItems: "center", textAlign: "center", display: "flex", }; function DonutChart({ data, width, height, innerRadius, outerRadius, }: DonutChartProps) { React.useEffect(() => { /* ------------------------------------------------------------ Set up and define constants ------------------------------------------------------------ */ const svg = d3.select("#donutchart"); svg.selectAll("*").remove(); /*------------------------------ Define chart dimensions ------------------------------ */ const innerRadiusHover = innerRadius; const outerRadiusHover = outerRadius + 25; const donutMiddleTitle = "Total physical qubits"; const padAngle = 0.01; const translationValX: number = width / 4; const translationValY: number = height / 4; /* ------------------------------ Define chart styling constants ------------------------------ */ const chartOpacity = 0.75; const chartHoverOpacity = 1; const colorArray = ["#1a5d8c", "#8c1a5c", "#aebac0", "#323130"]; /*------------------------------ Define color ranges ------------------------------ */ const algorithmRunTimeColor = colorArray[1]; const tfactoryLineColor = colorArray[0]; const chartColor = d3 .scaleOrdinal() .domain( d3.extent(data, (d) => { return d.legendTitle; }) as unknown as string, ) .range([algorithmRunTimeColor, tfactoryLineColor]); /* ------------------------------------------------------------ Begin draw chart ------------------------------------------------------------ */ /*------------------------------ Create pie and arc generators ------------------------------ */ const pieGenerator = d3 .pie<d3Helper.LegendData>() .padAngle(padAngle) .value((d) => d.value) .sort(null); const arcGenerator = d3 .arc<PieArcDatum<d3Helper.LegendData>>() .innerRadius(innerRadius) .outerRadius(outerRadius); const pieData = pieGenerator(data); const arcs = svg .selectAll("g") .data(pieData) .enter() .append("g") .attr("class", "arc") .attr( "transform", `translate(${innerRadius + translationValX},${ innerRadius + translationValY })`, ); /*------------------------------ Fill donut chart and apply hover ------------------------------ */ arcs .append("path") .attr("d", arcGenerator) .attr("fill", (d) => { return chartColor(d.data.legendTitle) as string; }) .style("opacity", chartOpacity) .on("mouseover", (d) => { const arcHover = d3 .arc<PieArcDatum<d3Helper.LegendData>>() .innerRadius(innerRadiusHover) .outerRadius(outerRadiusHover); d3.select(d.target).attr("d", arcHover as any); d3.select(d.target).style("opacity", chartHoverOpacity); }) .on("mouseout", (d) => { d3.select(d.target).attr("d", arcGenerator as any); d3.select(d.target).style("opacity", chartOpacity); }); // Add tooltips arcs .append("title") .text( (d) => `${d.data.title} ${d.data.legendTitle} : ${d3Format.format(",.0f")( d.value, )}`, ); /*------------------------------ Draw Legend ------------------------------ */ const legendY = translationValY + outerRadius * 2 + 10; const midpoint = outerRadius + 10; d3Helper.drawLegend( svg, data, midpoint, legendY, translationValX, chartColor, true, false, ); /*------------------------------ Add text and titles ------------------------------ */ // Add chart title d3Helper.drawText( svg, "Space diagram", innerRadius + translationValX, translationValY - innerRadius, titleStyle, ); // Add middle text const totalQubits = d3.sum(data, (d) => d.value); const totalQubitsStr = d3Format.format(",.0f")(totalQubits); d3Helper.drawText( svg, donutMiddleTitle, innerRadius + translationValX, translationValY + innerRadius - 25, donutMiddleTitleStyle, ); d3Helper.drawText( svg, totalQubitsStr, innerRadius + translationValX, translationValY + innerRadius + 25, donutMiddleTextStyle, ); }, [data, innerRadius, outerRadius]); return ( <div style={{ justifyContent: "center", alignItems: "center", }} > <svg id="donutchart" width={width} height={height}></svg> </div> ); } export default DonutChart;
azure-quantum-python/visualization/react-lib/src/components/d3-visualization-components/DonutChart.tsx/0
{ "file_path": "azure-quantum-python/visualization/react-lib/src/components/d3-visualization-components/DonutChart.tsx", "repo_id": "azure-quantum-python", "token_count": 2275 }
393
/*------------------------------------ Copyright (c) Microsoft Corporation. Licensed under the MIT License. All rights reserved. ------------------------------------ */ export interface JobResults { errorBudget: ErrorBudget; jobParams: JobParams; logicalCounts: LogicalCounts; logicalQubit: LogicalQubit; physicalCounts: PhysicalCounts; physicalCountsFormatted: { [key: string]: string }; reportData: ReportData; status: string; tfactory: Tfactory; } export interface ErrorBudget { logical: number; rotations: number; tstates: number; } export interface JobParams { errorBudget: number; qecScheme: QecScheme; qubitParams: QubitParams; } export interface QecScheme { crossingPrefactor: number; errorCorrectionThreshold: number; logicalCycleTime: string; name: string; physicalQubitsPerLogicalQubit: string; } export interface QubitParams { instructionSet: string; name: string; oneQubitGateErrorRate: number; oneQubitGateTime: string; oneQubitMeasurementErrorRate: number; oneQubitMeasurementTime: string; tGateErrorRate: number; tGateTime: string; twoQubitGateErrorRate: number; twoQubitGateTime: string; } export interface LogicalCounts { ccixCount: number; cczCount: number; measurementCount: number; numQubits: number; rotationCount: number; rotationDepth: number; tCount: number; } export interface LogicalQubit { codeDistance: number; logicalCycleTime: number; logicalErrorRate: number; physicalQubits: number; } export interface PhysicalCounts { breakdown: { [key: string]: number }; physicalQubits: number; runtime: number; } export interface ReportData { assumptions: string[]; groups: Group[]; } export interface Group { alwaysVisible: boolean; entries: Entry[]; title: string; } export interface Entry { description: string; explanation: string; label: string; path: string; } export interface Tfactory { codeDistancePerRound: number[]; logicalErrorRate: number; numInputTstates: number; numRounds: number; numTstates: number; numUnitsPerRound: number[]; physicalQubits: number; physicalQubitsPerRound: number[]; runtime: number; runtimePerRound: number[]; unitNamePerRound: string[]; }
azure-quantum-python/visualization/react-lib/src/models/JobResults.ts/0
{ "file_path": "azure-quantum-python/visualization/react-lib/src/models/JobResults.ts", "repo_id": "azure-quantum-python", "token_count": 716 }
394
JavaScript ========== .. toctree:: BiString BiStringBuilder Alignment Tokenization Tokenizer
bistring/docs/JavaScript/index.rst/0
{ "file_path": "bistring/docs/JavaScript/index.rst", "repo_id": "bistring", "token_count": 44 }
395
/*! * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT license. */ import { BiString, Alignment } from ".."; test("new BiString", () => { expect(() => new BiString(42 as any)).toThrow(TypeError); expect(() => new BiString("fourty-two", 42 as any)).toThrow(TypeError); expect(() => new BiString("fourty-two", "42", 42 as any)).toThrow(TypeError); expect(() => new BiString("fourty-two", "42", new Alignment([ [0, 0], [9, 2], ]))) .toThrow(RangeError); expect(() => new BiString("fourty-two", "42", new Alignment([ [0, 0], [10, 1], ]))) .toThrow(RangeError); new BiString("42"); new BiString("fourty-two", "42"); new BiString("fourty-two", "42", new Alignment([ [0, 0], [6, 1], [7, 1], [10, 2], ])); }); test("BiString.infer", () => { let bs = BiString.infer("test", "test"); expect(bs.equals(new BiString("test"))).toBe(true); bs = BiString.infer("color", "colour"); expect(bs.substring(3, 5).original).toBe("o"); expect(bs.inverse().equals(BiString.infer("colour", "color"))).toBe(true); bs = BiString.infer( "🅃🄷🄴 🅀🅄🄸🄲🄺, 🄱🅁🄾🅆🄽 🦊 🄹🅄🄼🄿🅂 🄾🅅🄴🅁 🅃🄷🄴 🄻🄰🅉🅈 🐶", "the quick brown fox jumps over the lazy dog", ); expect(bs.substring(0, 3).original).toBe("🅃🄷🄴"); expect(bs.substring(0, 3).modified).toBe("the"); expect(bs.substring(4, 9).original).toBe("🅀🅄🄸🄲🄺"); expect(bs.substring(4, 9).modified).toBe("quick"); expect(bs.substring(10, 15).original).toBe("🄱🅁🄾🅆🄽"); expect(bs.substring(10, 15).modified).toBe("brown"); expect(bs.substring(16, 19).original).toBe("🦊"); expect(bs.substring(16, 19).modified).toBe("fox"); expect(bs.substring(20, 25).original).toBe("🄹🅄🄼🄿🅂"); expect(bs.substring(20, 25).modified).toBe("jumps"); expect(bs.substring(40, 43).original).toBe("🐶"); expect(bs.substring(40, 43).modified).toBe("dog"); bs = BiString.infer( "Ṫḧë qüïċḳ, ḅṛöẅṅ 🦊 jüṁṗṡ öṿëṛ ẗḧë ḷäżÿ 🐶", "the quick brown fox jumps over the lazy dog", ); expect(bs.substring(0, 3).equals(new BiString("Ṫḧë", "the", Alignment.identity(3)))).toBe(true); expect(bs.substring(4, 9).equals(new BiString("qüïċḳ", "quick", Alignment.identity(5)))).toBe(true); expect(bs.substring(10, 15).equals(new BiString("ḅṛöẅṅ", "brown", Alignment.identity(5)))).toBe(true); expect(bs.substring(16, 19).original).toBe("🦊"); expect(bs.substring(16, 19).modified).toBe("fox"); expect(bs.substring(20, 25).equals(new BiString("jüṁṗṡ", "jumps", Alignment.identity(5)))).toBe(true); expect(bs.substring(40, 43).original).toBe("🐶"); expect(bs.substring(40, 43).modified).toBe("dog"); bs = BiString.infer("Z̴̡̪̫̖̥̔̿̃̈̏̎͠͝á̸̪̠̖̻̬̖̪̞͙͇̮̠͎̆͋́̐͌̒͆̓l̶͉̭̳̤̬̮̩͎̟̯̜͇̥̠̘͑͐̌͂̄́̀̂̌̈͛̊̄̚͜ģ̸̬̼̞̙͇͕͎̌̾̒̐̿̎̆̿̌̃̏̌́̾̈͘͜o̶̢̭͕͔̩͐ ̴̡̡̜̥̗͔̘̦͉̣̲͚͙̐̈́t̵͈̰̉̀͒̎̈̿̔̄̽͑͝͠ẹ̵̫̲̫̄͜͜x̵͕̳͈̝̤̭̼̼̻͓̿̌̽̂̆̀̀̍̒͐́̈̀̚͝t̸̡̨̥̺̣̟͎̝̬̘̪͔͆́̄̅̚", "Zalgo text"); for (let i = 0; i < bs.length; ++i) { expect(bs.substring(i, i + 1).original.startsWith(bs[i])).toBe(true); } expect(BiString.infer("", "").equals(new BiString(""))).toBe(true); expect(BiString.infer("a", "").equals(new BiString("a", ""))).toBe(true); expect(BiString.infer("", "a").equals(new BiString("", "a"))).toBe(true); }); test("BiString.concat", () => { let bs = new BiString(" ", "").concat( "Hello", new BiString(" ", " "), "world!", new BiString(" ", ""), ); expect(bs.original).toBe(" Hello world! "); expect(bs.modified).toBe("Hello world!"); bs = bs.substring(4, 7); expect(bs.original).toBe("o w"); expect(bs.modified).toBe("o w"); bs = bs.substring(1, 2); expect(bs.original).toBe(" "); expect(bs.modified).toBe(" "); }); test("BiString.indexOf", () => { const bs = new BiString("dysfunction"); expect(bs.indexOf("dis")).toBe(-1); expect(bs.indexOf("fun")).toBe(3); expect(bs.indexOf("n")).toBe(5); expect(bs.indexOf("n", 6)).toBe(10); expect(bs.indexOf("n", 11)).toBe(-1); expect(bs.boundsOf("dis")).toEqual([-1, -1]); expect(bs.boundsOf("fun")).toEqual([3, 6]); expect(bs.boundsOf("n")).toEqual([5, 6]); expect(bs.boundsOf("n", 6)).toEqual([10, 11]); expect(bs.boundsOf("n", 11)).toEqual([-1, -1]); }); test("BiString.lastIndexOf", () => { const bs = new BiString("dysfunction"); expect(bs.lastIndexOf("dis")).toBe(-1); expect(bs.lastIndexOf("fun")).toBe(3); expect(bs.lastIndexOf("n")).toBe(10); expect(bs.lastIndexOf("n", 9)).toBe(5); expect(bs.lastIndexOf("n", 4)).toBe(-1); expect(bs.lastBoundsOf("dis")).toEqual([-1, -1]); expect(bs.lastBoundsOf("fun")).toEqual([3, 6]); expect(bs.lastBoundsOf("n")).toEqual([10, 11]); expect(bs.lastBoundsOf("n", 9)).toEqual([5, 6]); expect(bs.lastBoundsOf("n", 4)).toEqual([-1, -1]); }); test("BiString.{starts,ends}With", () => { const bs = new BiString("Beginning, middle, ending"); expect(bs.startsWith("Begin")).toBe(true); expect(bs.endsWith("ing")).toBe(true); expect(bs.startsWith("ending")).toBe(false); expect(bs.endsWith("Beginning")).toBe(false); }); test("BiString.pad*", () => { const bs = new BiString("Hello world!"); expect(bs.padStart(5).equals(bs)).toBe(true); expect(bs.padEnd(5).equals(bs)).toBe(true); let pad = new BiString("", " "); expect(bs.padStart(16).equals(pad.concat(bs))).toBe(true); expect(bs.padEnd(16).equals(bs.concat(pad))).toBe(true); }); test("BiString.split", () => { let bs = new BiString("The quick, brown fox jumps over the lazy dog"); expect(bs.split()).toEqual([bs]); expect(bs.split("").map(s => s.modified)).toEqual(bs.modified.split("")); expect(bs.split(" ").map(s => s.modified)).toEqual(bs.modified.split(" ")); expect(bs.split(/ /).map(s => s.modified)).toEqual(bs.modified.split(/ /)); expect(bs.split(/ /y).map(s => s.modified)).toEqual(bs.modified.split(/ /y)); expect(bs.split("", 0).map(s => s.modified)).toEqual(bs.modified.split("", 0)); expect(bs.split(" ", 0).map(s => s.modified)).toEqual(bs.modified.split(" ", 0)); expect(bs.split(/ /, 0).map(s => s.modified)).toEqual(bs.modified.split(/ /, 0)); expect(bs.split("", 3).map(s => s.modified)).toEqual(bs.modified.split("", 3)); expect(bs.split(" ", 3).map(s => s.modified)).toEqual(bs.modified.split(" ", 3)); expect(bs.split(/ /, 3).map(s => s.modified)).toEqual(bs.modified.split(/ /, 3)); expect(bs.split("", 20).map(s => s.modified)).toEqual(bs.modified.split("", 20)); expect(bs.split(" ", 20).map(s => s.modified)).toEqual(bs.modified.split(" ", 20)); expect(bs.split(/ /, 20).map(s => s.modified)).toEqual(bs.modified.split(/ /, 20)); bs = new BiString(" The quick, brown fox"); expect(bs.split(" ").map(s => s.modified)).toEqual(bs.modified.split(" ")); expect(bs.split(/ /).map(s => s.modified)).toEqual(bs.modified.split(/ /)); bs = new BiString("The quick, brown fox "); expect(bs.split(" ").map(s => s.modified)).toEqual(bs.modified.split(" ")); expect(bs.split(/ /).map(s => s.modified)).toEqual(bs.modified.split(/ /)); bs = new BiString(" The quick, brown fox "); expect(bs.split(" ").map(s => s.modified)).toEqual(bs.modified.split(" ")); expect(bs.split(/ /).map(s => s.modified)).toEqual(bs.modified.split(/ /)); }); test("BiString.join", () => { const sep = new BiString(" ", ", "); const chunks = new BiString("The quick brown fox").split(" "); const bs = sep.join(chunks); expect(bs.original).toBe("The quick brown fox"); expect(bs.modified).toBe("The, quick, brown, fox"); }); test("BiString.trim{,Start,End}", () => { let bs = new BiString(" Hello world! "); expect(bs.trim().modified).toBe("Hello world!"); expect(bs.trimStart().modified).toBe("Hello world! "); expect(bs.trimEnd().modified).toBe(" Hello world!"); bs = new BiString(" "); expect(bs.trim().modified).toBe(""); expect(bs.trimStart().modified).toBe(""); expect(bs.trimEnd().modified).toBe(""); }); test("BiString.normalize", () => { // "Héllö" -- é is composed but ö has a combining diaeresis let bs = new BiString("H\u00E9llo\u0308").normalize("NFC"); expect(bs.original).toBe("H\u00E9llo\u0308"); expect(bs.modified).toBe("H\u00E9ll\u00F6"); expect(bs.modified).toBe(bs.original.normalize("NFC")); expect(bs.slice(1, 2).equals(new BiString("\u00E9"))).toBe(true); expect(bs.slice(4, 5).equals(new BiString("o\u0308", "\u00F6"))).toBe(true); bs = new BiString("H\u00E9llo\u0308").normalize("NFD"); expect(bs.original).toBe("H\u00E9llo\u0308"); expect(bs.modified).toBe("He\u0301llo\u0308"); expect(bs.modified).toBe(bs.original.normalize("NFD")); expect(bs.slice(1, 3).equals(new BiString("\u00E9", "e\u0301"))).toBe(true); expect(bs.slice(5, 7).original).toBe("o\u0308"); expect(bs.slice(5, 7).modified).toBe("o\u0308"); expect(bs.slice(5, 7).equals(new BiString("o\u0308"))).toBe(true); }); test("BiString.toLowerCase", () => { let bs = new BiString("Hello World").toLowerCase(); let expected = new BiString("Hello World", "hello world", Alignment.identity(11)); expect(bs.equals(expected)).toBe(true); // Odysseus bs = new BiString("ὈΔΥΣΣΕΎΣ").toLowerCase(); expected = new BiString("ὈΔΥΣΣΕΎΣ", "ὀδυσσεύς", Alignment.identity(8)); expect(bs.equals(expected)).toBe(true); // Examples from The Unicode Standard, Version 12.0, Chapter 3.13 bs = new BiString("ᾼΣͅ").toLowerCase(); expected = new BiString("ᾼΣͅ", "ᾳςͅ", Alignment.identity(4)); expect(bs.equals(expected)).toBe(true); bs = new BiString("ͅΣͅ").toLowerCase(); expected = new BiString("ͅΣͅ", "ͅσͅ", Alignment.identity(3)); expect(bs.equals(expected)).toBe(true); bs = new BiString("ᾼΣᾼ").toLowerCase(); expected = new BiString("ᾼΣᾼ", "ᾳσᾳ", Alignment.identity(5)); expect(bs.equals(expected)).toBe(true); bs = new BiString("Σ").toLowerCase(); expected = new BiString("Σ", "σ"); expect(bs.equals(expected)).toBe(true); }); test("BiString.toUpperCase", () => { let bs = new BiString("Hello World").toUpperCase(); let expected = new BiString("Hello World", "HELLO WORLD", Alignment.identity(11)); expect(bs.equals(expected)).toBe(true); bs = new BiString("straße").toUpperCase(); expected = new BiString("stra", "STRA", Alignment.identity(4)).concat( new BiString("ß", "SS"), new BiString("e", "E"), ); expect(bs.equals(expected)).toBe(true); // Odysseus bs = new BiString("Ὀδυσσεύς").toUpperCase(); expected = new BiString("Ὀδυσσεύς", "ὈΔΥΣΣΕΎΣ", Alignment.identity(8)); expect(bs.equals(expected)).toBe(true); }); test("README", () => { let bs = new BiString("𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐, 𝖇𝖗𝖔𝖜𝖓 🦊 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 🐶"); bs = bs.normalize("NFKD"); bs = bs.toLowerCase(); bs = bs.replace("🦊", "fox") bs = bs.replace("🐶", "dog") bs = bs.replace(/[^\w\s]+/g, ""); bs = bs.slice(0, 19); expect(bs.modified).toBe("the quick brown fox"); expect(bs.original).toBe("𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐, 𝖇𝖗𝖔𝖜𝖓 🦊"); });
bistring/js/tests/bistring.test.ts/0
{ "file_path": "bistring/js/tests/bistring.test.ts", "repo_id": "bistring", "token_count": 5482 }
396
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. from __future__ import annotations __all__ = [ 'Token', 'Tokenization', 'Tokenizer', 'RegexTokenizer', 'SplittingTokenizer', 'CharacterTokenizer', 'WordTokenizer', 'SentenceTokenizer', ] from abc import ABC, abstractmethod from dataclasses import dataclass import icu import threading from typing import Callable, Iterable, Iterator, Optional, Sequence, Union, overload from ._alignment import Alignment from ._bistr import bistr, String from ._regex import compile_regex from ._typing import AnyBounds, Bounds, Index, Regex @dataclass(frozen=True) class Token: """ A token extracted from a string. """ text: bistr """ The actual text of the token. """ start: int """ The start position of the token. """ end: int """ The end position of the token. """ def __init__(self, text: String, start: int, end: int): """ :param text: The text of this token. :param start: The starting index of this token. :param end: The ending index of this token. """ super().__setattr__('text', bistr(text)) super().__setattr__('start', start) super().__setattr__('end', end) @property def original(self) -> str: """ The original value of this token. """ return self.text.original @property def modified(self) -> str: """ The modified value of this token. """ return self.text.modified @classmethod def slice(cls, text: String, start: int, end: int) -> Token: """ Create a Token from a slice of a bistr. :param text: The (bi)string to slice. :param start: The starting index of the token. :param end: The ending index of the token. """ return cls(text[start:end], start, end) def __str__(self) -> str: return f'[{self.start}:{self.end}]={self.text}' def __repr__(self) -> str: return f'Token({self.text!r}, start={self.start}, end={self.end})' @dataclass(frozen=True) class Tokenization: """ A string and its tokenization. """ text: bistr """ The text that was tokenized. """ alignment: Alignment """ The alignment from text indices to token indices. """ _tokens: Sequence[Token] def __init__(self, text: String, tokens: Iterable[Token]): """ :param text: The text from which the tokens have been extracted. :param tokens: The tokens extracted from the text. """ text = bistr(text) tokens = tuple(tokens) alignment = [(0, 0)] for i, token in enumerate(tokens): alignment.append((token.start, i)) alignment.append((token.end, i + 1)) alignment.append((len(text), len(tokens))) super().__setattr__('text', text) super().__setattr__('_tokens', tokens) super().__setattr__('alignment', Alignment(alignment)) @classmethod def infer(cls, text: String, tokens: Iterable[str]) -> Tokenization: r""" Infer a `Tokenization` from a sequence of tokens. >>> tokens = Tokenization.infer('hello, world!', ['hello', 'world']) >>> tokens[0] Token(bistr('hello'), start=0, end=5) >>> tokens[1] Token(bistr('world'), start=7, end=12) Due to the possibility of ambiguity, it is much better to use a :class:`Tokenizer` or some other method of producing :class:`Token`\ s with their positions explicitly set. :returns: The inferred tokenization, with token positions found by simple forward search. :raises: :class:`ValueError` if the tokens can't be found in the source string. """ text = bistr(text) result = [] start = 0 for token in tokens: start, end = text.index_bounds(token, start) result.append(Token.slice(text, start, end)) start = end return cls(text, result) def __iter__(self) -> Iterator[Token]: return iter(self._tokens) def __len__(self) -> int: return len(self._tokens) @overload def __getitem__(self, index: int) -> Token: ... @overload def __getitem__(self, index: slice) -> Tokenization: ... def __getitem__(self, index: Index) -> Union[Token, Tokenization]: r""" Indexing a `Tokenization` returns the nth token: >>> tokens = Tokenization.infer( ... "The quick, brown fox", ... ["The", "quick", "brown", "fox"], ... ) >>> tokens[0] Token(bistr('The'), start=0, end=3) Slicing a `Tokenization` returns a new one with the requested slice of tokens: >>> tokens = tokens[1:-1] >>> tokens[0] Token(bistr('quick'), start=4, end=9) """ if isinstance(index, slice): return Tokenization(self.text, self._tokens[index]) else: return self._tokens[index] def __str__(self) -> str: tokens = ', '.join(map(str, self)) return f'Tokenization({self.text}, [{tokens}])' def __repr__(self) -> str: return f'Tokenization({self.text!r}, {self._tokens!r})' def substring(self, *args: AnyBounds) -> bistr: """ Map a span of tokens to the corresponding substring. With no arguments, returns the substring from the first to the last token. """ i, j = self.text_bounds(*args) return self.text[i:j] def text_bounds(self, *args: AnyBounds) -> Bounds: """ Map a span of tokens to the bounds of the corresponding text. With no arguments, returns the bounds from the first to the last token. """ if len(args) == 0: args = (0, len(self)) return self.alignment.original_bounds(*args) def original_bounds(self, *args: AnyBounds) -> Bounds: """ Map a span of tokens to the bounds of the corresponding original text. With no arguments, returns the bounds from the first to the last token. """ return self.text.alignment.original_bounds(self.text_bounds(*args)) def bounds_for_text(self, *args: AnyBounds) -> Bounds: """ Map a span of text to the bounds of the corresponding span of tokens. """ return self.alignment.modified_bounds(*args) def bounds_for_original(self, *args: AnyBounds) -> Bounds: """ Map a span of original text to the bounds of the corresponding span of tokens. """ text_bounds = self.text.alignment.modified_bounds(*args) return self.alignment.modified_bounds(text_bounds) def slice_by_text(self, *args: AnyBounds) -> Tokenization: """ Map a span of text to the corresponding span of tokens. """ i, j = self.bounds_for_text(*args) return self[i:j] def slice_by_original(self, *args: AnyBounds) -> Tokenization: """ Map a span of the original text to the corresponding span of tokens. """ i, j = self.bounds_for_original(*args) return self[i:j] def snap_text_bounds(self, *args: AnyBounds) -> Bounds: """ Expand a span of text to align it with token boundaries. """ return self.text_bounds(self.bounds_for_text(*args)) def snap_original_bounds(self, *args: AnyBounds) -> Bounds: """ Expand a span of original text to align it with token boundaries. """ return self.original_bounds(self.bounds_for_original(*args)) class Tokenizer(ABC): """ Abstract base class for tokenizers. """ @abstractmethod def tokenize(self, text: String) -> Tokenization: """ Tokenize some text. :param text: The text to tokenize, as either an `str` or :class:`~bistring.bistr`. A plain `str` should be converted to a `bistr` before processing. :returns: A :class:`~bistring.Tokenization` holding the text and its tokens. """ pass class RegexTokenizer(Tokenizer): r""" Breaks text into tokens based on a regex. >>> tokenizer = RegexTokenizer(r'\w+') >>> tokens = tokenizer.tokenize('the quick brown fox jumps over the lazy dog') >>> tokens[0] Token(bistr('the'), start=0, end=3) >>> tokens[1] Token(bistr('quick'), start=4, end=9) """ def __init__(self, regex: Regex): """ :param regex: A (possibly compiled) regular expression that matches tokens to extract. """ self._pattern = compile_regex(regex) def tokenize(self, text: String) -> Tokenization: text = bistr(text) tokens = [] for match in self._pattern.finditer(text.modified): tokens.append(Token.slice(text, match.start(), match.end())) return Tokenization(text, tokens) class SplittingTokenizer(Tokenizer): r""" Splits text into tokens based on a regex. >>> tokenizer = SplittingTokenizer(r'\s+') >>> tokens = tokenizer.tokenize('the quick brown fox jumps over the lazy dog') >>> tokens[0] Token(bistr('the'), start=0, end=3) >>> tokens[1] Token(bistr('quick'), start=4, end=9) """ def __init__(self, regex: Regex): """ :param regex: A (possibly compiled) regular expression that matches the regions between tokens. """ self._pattern = compile_regex(regex) def tokenize(self, text: String) -> Tokenization: text = bistr(text) tokens = [] last = 0 for match in self._pattern.finditer(text.modified): start = match.start() if start > last: tokens.append(Token.slice(text, last, start)) last = match.end() end = len(text.modified) if end > last: tokens.append(Token.slice(text, last, end)) return Tokenization(text, tokens) class _IcuTokenizer(Tokenizer): """ Base class for ICU BreakIterator-based tokenizers. """ def __init__(self, locale: str, constructor: Callable[[icu.Locale], icu.BreakIterator]): # BreakIterator is not a thread-safe API, so store a cache of # thread-local iterators self._locale = icu.Locale(locale) self._constructor = constructor self._local = threading.local() # Eagerly construct one on this thread as an optimization, and to check # for errors self._break_iterator() def _break_iterator(self) -> icu.BreakIterator: bi: Optional[icu.BreakIterator] = getattr(self._local, 'bi', None) if bi is None: bi = self._constructor(self._locale) self._local.bi = bi return bi def tokenize(self, text: String) -> Tokenization: text = bistr(text) tokens = [] bi = self._break_iterator() utext = icu.UnicodeString(text.modified) bi.setText(utext) ui = bi.first() uj = bi.nextBoundary() i = 0 while uj != icu.BreakIterator.DONE: j = i + utext.countChar32(ui, uj - ui) if self._check_token(bi.getRuleStatus()): tokens.append(Token.slice(text, i, j)) ui = uj uj = bi.nextBoundary() i = j return Tokenization(text, tokens) def _check_token(self, tag: int) -> bool: return True class CharacterTokenizer(_IcuTokenizer): """ Splits text into user-perceived characters/grapheme clusters. >>> tokenizer = CharacterTokenizer('th_TH') >>> tokens = tokenizer.tokenize('กำนัล') >>> tokens[0] Token(bistr('กำ'), start=0, end=2) >>> tokens[1] Token(bistr('นั'), start=2, end=4) >>> tokens[2] Token(bistr('ล'), start=4, end=5) """ def __init__(self, locale: str): """ :param locale: The name of the locale to use for computing user-perceived character boundaries. """ super().__init__(locale, icu.BreakIterator.createCharacterInstance) class WordTokenizer(_IcuTokenizer): """ Splits text into words based on Unicode rules. >>> tokenizer = WordTokenizer('en_US') >>> tokens = tokenizer.tokenize('the quick brown fox jumps over the lazy dog') >>> tokens[0] Token(bistr('the'), start=0, end=3) >>> tokens[1] Token(bistr('quick'), start=4, end=9) """ def __init__(self, locale: str): """ :param locale: The name of the locale to use for computing word boundaries. """ super().__init__(locale, icu.BreakIterator.createWordInstance) def _check_token(self, tag: int) -> bool: return tag >= 100 # UBRK_WORD_NONE_LIMIT class SentenceTokenizer(_IcuTokenizer): """ Splits text into sentences based on Unicode rules. >>> tokenizer = SentenceTokenizer('en_US') >>> tokens = tokenizer.tokenize( ... 'Word, sentence, etc. boundaries are hard. Luckily, Unicode can help.' ... ) >>> tokens[0] Token(bistr('Word, sentence, etc. boundaries are hard. '), start=0, end=42) >>> tokens[1] Token(bistr('Luckily, Unicode can help.'), start=42, end=68) """ def __init__(self, locale: str): """ :param locale: The name of the locale to use for computing sentence boundaries. """ super().__init__(locale, icu.BreakIterator.createSentenceInstance)
bistring/python/bistring/_token.py/0
{ "file_path": "bistring/python/bistring/_token.py", "repo_id": "bistring", "token_count": 6058 }
397
default_stages: [push] repos: - repo: https://github.com/pre-commit/mirrors-pylint rev: v2.4.3 hooks: - id: pylint files: libraries args: [--rcfile=.pylintrc] verbose: true
botbuilder-python/.pre-commit-config.yaml/0
{ "file_path": "botbuilder-python/.pre-commit-config.yaml", "repo_id": "botbuilder-python", "token_count": 108 }
398
# CoreBot Bot Framework v4 core bot sample. This bot has been created using [Bot Framework](https://dev.botframework.com), it shows how to: - Use [LUIS](https://www.luis.ai) to implement core AI capabilities - Implement a multi-turn conversation using Dialogs - Handle user interruptions for such things as `Help` or `Cancel` - Prompt for and validate requests for information from the user ## Prerequisites This sample **requires** prerequisites in order to run. ### Overview This bot uses [LUIS](https://www.luis.ai), an AI based cognitive service, to implement language understanding. ### Install Python 3.6 ### Create a LUIS Application to enable language understanding LUIS language model setup, training, and application configuration steps can be found [here](https://docs.microsoft.com/azure/bot-service/bot-builder-howto-v4-luis?view=azure-bot-service-4.0&tabs=cs). If you wish to create a LUIS application via the CLI, these steps can be found in the [README-LUIS.md](README-LUIS.md). ## Running the sample - Run `pip install -r requirements.txt` to install all dependencies - Update LuisAppId, LuisAPIKey and LuisAPIHostName in `config.py` with the information retrieved from the [LUIS portal](https://www.luis.ai) - Run `python app.py` ## Testing the bot using Bot Framework Emulator [Bot Framework Emulator](https://github.com/microsoft/botframework-emulator) is a desktop application that allows bot developers to test and debug their bots on localhost or running remotely through a tunnel. - Install the Bot Framework Emulator version 4.3.0 or greater from [here](https://github.com/Microsoft/BotFramework-Emulator/releases) ### Connect to the bot using Bot Framework Emulator - Launch Bot Framework Emulator - Enter a Bot URL of `http://localhost:3978/api/messages` ## Further reading - [Bot Framework Documentation](https://docs.botframework.com) - [Bot Basics](https://docs.microsoft.com/azure/bot-service/bot-builder-basics?view=azure-bot-service-4.0) - [Dialogs](https://docs.microsoft.com/azure/bot-service/bot-builder-concept-dialog?view=azure-bot-service-4.0) - [Gathering Input Using Prompts](https://docs.microsoft.com/azure/bot-service/bot-builder-prompts?view=azure-bot-service-4.0&tabs=csharp) - [Activity processing](https://docs.microsoft.com/en-us/azure/bot-service/bot-builder-concept-activity-processing?view=azure-bot-service-4.0) - [Azure Bot Service Introduction](https://docs.microsoft.com/azure/bot-service/bot-service-overview-introduction?view=azure-bot-service-4.0) - [Azure Bot Service Documentation](https://docs.microsoft.com/azure/bot-service/?view=azure-bot-service-4.0) - [.NET Core CLI tools](https://docs.microsoft.com/dotnet/core/tools/?tabs=netcore2x) - [Azure CLI](https://docs.microsoft.com/cli/azure/?view=azure-cli-latest) - [Azure Portal](https://portal.azure.com) - [Language Understanding using LUIS](https://docs.microsoft.com/azure/cognitive-services/luis/) - [Channels and Bot Connector Service](https://docs.microsoft.com/azure/bot-service/bot-concepts?view=azure-bot-service-4.0)
botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/README.md/0
{ "file_path": "botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/README.md", "repo_id": "botbuilder-python", "token_count": 928 }
399