hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
76e9a812614757a71964f913bf5139aa3dfb29ca
1,948
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT use std::fmt; use getset::Getters; use serde::{Deserialize, Serialize}; use tezos_encoding::{enc::BinWriter, encoding::HasEncoding, generator::Generated, nom::NomReader}; use super::limits::CHAIN_NAME_MAX_LENGTH; use std::hash::{Hash, Hasher}; /// Holds informations about chain compatibility, features compatibility... #[derive(Serialize, Deserialize, Getters, Clone, HasEncoding, NomReader, BinWriter, Generated)] pub struct NetworkVersion { #[get = "pub"] #[encoding(string = "CHAIN_NAME_MAX_LENGTH")] chain_name: String, #[get = "pub"] distributed_db_version: u16, #[get = "pub"] p2p_version: u16, } impl fmt::Debug for NetworkVersion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Version") .field("chain_name", &self.chain_name) .field("distributed_db_version", &self.distributed_db_version) .field("p2p_version", &self.p2p_version) .finish() } } impl NetworkVersion { pub fn new(chain_name: String, distributed_db_version: u16, p2p_version: u16) -> Self { NetworkVersion { chain_name, distributed_db_version, p2p_version, } } pub fn supports_nack_with_list_and_motive(&self) -> bool { self.p2p_version > 0 } } impl Eq for NetworkVersion {} impl PartialEq for NetworkVersion { fn eq(&self, other: &Self) -> bool { self.chain_name == other.chain_name && self.distributed_db_version == other.distributed_db_version && self.p2p_version == other.p2p_version } } impl Hash for NetworkVersion { fn hash<H: Hasher>(&self, state: &mut H) { state.write(self.chain_name.as_bytes()); state.write_u16(self.distributed_db_version); state.write_u16(self.p2p_version); } }
29.515152
98
0.658624
fe112cc3887b11f53d5afac2da6569958ebf9976
20,706
use linked_hash_map::LinkedHashMap; use parser::*; use scanner::{Marker, ScanError, TScalarStyle, TokenType}; use std::collections::BTreeMap; use std::f64; use std::i64; use std::mem; use std::ops::Index; use std::string; use std::vec; /// A YAML node is stored as this `Yaml` enumeration, which provides an easy way to /// access your YAML document. /// /// # Examples /// /// ``` /// use yaml_rust::Yaml; /// let foo = Yaml::from_str("-123"); // convert the string to the appropriate YAML type /// assert_eq!(foo.as_i64().unwrap(), -123); /// /// // iterate over an Array /// let vec = Yaml::Array(vec![Yaml::Integer(1), Yaml::Integer(2)]); /// for v in vec.as_vec().unwrap() { /// assert!(v.as_i64().is_some()); /// } /// ``` #[derive(Clone, PartialEq, PartialOrd, Debug, Eq, Ord, Hash)] pub enum Yaml { /// Float types are stored as String and parsed on demand. /// Note that f64 does NOT implement Eq trait and can NOT be stored in BTreeMap. Real(string::String), /// YAML int is stored as i64. Integer(i64), /// YAML scalar. String(string::String), /// YAML bool, e.g. `true` or `false`. Boolean(bool), /// YAML array, can be accessed as a `Vec`. Array(self::Array), /// YAML hash, can be accessed as a `LinkedHashMap`. /// /// Insertion order will match the order of insertion into the map. Hash(self::Hash), /// Alias, not fully supported yet. Alias(usize), /// YAML null, e.g. `null` or `~`. Null, /// Accessing a nonexistent node via the Index trait returns `BadValue`. This /// simplifies error handling in the calling code. Invalid type conversion also /// returns `BadValue`. BadValue, } pub type Array = Vec<Yaml>; pub type Hash = LinkedHashMap<Yaml, Yaml>; // parse f64 as Core schema // See: https://github.com/chyh1990/yaml-rust/issues/51 fn parse_f64(v: &str) -> Option<f64> { match v { ".inf" | ".Inf" | ".INF" | "+.inf" | "+.Inf" | "+.INF" => Some(f64::INFINITY), "-.inf" | "-.Inf" | "-.INF" => Some(f64::NEG_INFINITY), ".nan" | "NaN" | ".NAN" => Some(f64::NAN), _ => v.parse::<f64>().ok(), } } pub struct YamlLoader { docs: Vec<Yaml>, // states // (current node, anchor_id) tuple doc_stack: Vec<(Yaml, usize)>, key_stack: Vec<Yaml>, anchor_map: BTreeMap<usize, Yaml>, } impl MarkedEventReceiver for YamlLoader { fn on_event(&mut self, ev: Event, _: Marker) { // println!("EV {:?}", ev); match ev { Event::DocumentStart => { // do nothing } Event::DocumentEnd => { match self.doc_stack.len() { // empty document 0 => self.docs.push(Yaml::BadValue), 1 => self.docs.push(self.doc_stack.pop().unwrap().0), _ => unreachable!(), } } Event::SequenceStart(aid) => { self.doc_stack.push((Yaml::Array(Vec::new()), aid)); } Event::SequenceEnd => { let node = self.doc_stack.pop().unwrap(); self.insert_new_node(node); } Event::MappingStart(aid) => { self.doc_stack.push((Yaml::Hash(Hash::new()), aid)); self.key_stack.push(Yaml::BadValue); } Event::MappingEnd => { self.key_stack.pop().unwrap(); let node = self.doc_stack.pop().unwrap(); self.insert_new_node(node); } Event::Scalar(v, style, aid, tag) => { let node = if style != TScalarStyle::Plain { Yaml::String(v) } else if let Some(TokenType::Tag(ref handle, ref suffix)) = tag { // XXX tag:yaml.org,2002: if handle == "!!" { match suffix.as_ref() { "bool" => { // "true" or "false" match v.parse::<bool>() { Err(_) => Yaml::BadValue, Ok(v) => Yaml::Boolean(v), } } "int" => match v.parse::<i64>() { Err(_) => Yaml::BadValue, Ok(v) => Yaml::Integer(v), }, "float" => match parse_f64(&v) { Some(_) => Yaml::Real(v), None => Yaml::BadValue, }, "null" => match v.as_ref() { "~" | "null" => Yaml::Null, _ => Yaml::BadValue, }, _ => Yaml::String(v), } } else { Yaml::String(v) } } else { // Datatype is not specified, or unrecognized Yaml::from_str(&v) }; self.insert_new_node((node, aid)); } Event::Alias(id) => { let n = match self.anchor_map.get(&id) { Some(v) => v.clone(), None => Yaml::BadValue, }; self.insert_new_node((n, 0)); } _ => { /* ignore */ } } // println!("DOC {:?}", self.doc_stack); } } impl YamlLoader { fn insert_new_node(&mut self, node: (Yaml, usize)) { // valid anchor id starts from 1 if node.1 > 0 { self.anchor_map.insert(node.1, node.0.clone()); } if self.doc_stack.is_empty() { self.doc_stack.push(node); } else { let parent = self.doc_stack.last_mut().unwrap(); match *parent { (Yaml::Array(ref mut v), _) => v.push(node.0), (Yaml::Hash(ref mut h), _) => { let cur_key = self.key_stack.last_mut().unwrap(); // current node is a key if cur_key.is_badvalue() { *cur_key = node.0; // current node is a value } else { let mut newkey = Yaml::BadValue; mem::swap(&mut newkey, cur_key); h.insert(newkey, node.0); } } _ => unreachable!(), } } } pub fn load_from_str(source: &str) -> Result<Vec<Yaml>, ScanError> { let mut loader = YamlLoader { docs: Vec::new(), doc_stack: Vec::new(), key_stack: Vec::new(), anchor_map: BTreeMap::new(), }; let mut parser = Parser::new(source.chars()); parser.load(&mut loader, true)?; Ok(loader.docs) } } macro_rules! define_as ( ($name:ident, $t:ident, $yt:ident) => ( pub fn $name(&self) -> Option<$t> { match *self { Yaml::$yt(v) => Some(v), _ => None } } ); ); macro_rules! define_as_ref ( ($name:ident, $t:ty, $yt:ident) => ( pub fn $name(&self) -> Option<$t> { match *self { Yaml::$yt(ref v) => Some(v), _ => None } } ); ); macro_rules! define_into ( ($name:ident, $t:ty, $yt:ident) => ( pub fn $name(self) -> Option<$t> { match self { Yaml::$yt(v) => Some(v), _ => None } } ); ); impl Yaml { define_as!(as_bool, bool, Boolean); define_as!(as_i64, i64, Integer); define_as_ref!(as_str, &str, String); define_as_ref!(as_hash, &Hash, Hash); define_as_ref!(as_vec, &Array, Array); define_into!(into_bool, bool, Boolean); define_into!(into_i64, i64, Integer); define_into!(into_string, String, String); define_into!(into_hash, Hash, Hash); define_into!(into_vec, Array, Array); pub fn is_null(&self) -> bool { match *self { Yaml::Null => true, _ => false, } } pub fn is_badvalue(&self) -> bool { match *self { Yaml::BadValue => true, _ => false, } } pub fn is_array(&self) -> bool { match *self { Yaml::Array(_) => true, _ => false, } } pub fn as_f64(&self) -> Option<f64> { match *self { Yaml::Real(ref v) => parse_f64(v), _ => None, } } pub fn into_f64(self) -> Option<f64> { match self { Yaml::Real(ref v) => parse_f64(v), _ => None, } } } #[cfg_attr(feature = "cargo-clippy", allow(should_implement_trait))] impl Yaml { // Not implementing FromStr because there is no possibility of Error. // This function falls back to Yaml::String if nothing else matches. pub fn from_str(v: &str) -> Yaml { if v.starts_with("0x") { let n = i64::from_str_radix(&v[2..], 16); if n.is_ok() { return Yaml::Integer(n.unwrap()); } } if v.starts_with("0o") { let n = i64::from_str_radix(&v[2..], 8); if n.is_ok() { return Yaml::Integer(n.unwrap()); } } if v.starts_with('+') && v[1..].parse::<i64>().is_ok() { return Yaml::Integer(v[1..].parse::<i64>().unwrap()); } match v { "~" | "null" => Yaml::Null, "true" => Yaml::Boolean(true), "false" => Yaml::Boolean(false), _ if v.parse::<i64>().is_ok() => Yaml::Integer(v.parse::<i64>().unwrap()), // try parsing as f64 _ if parse_f64(v).is_some() => Yaml::Real(v.to_owned()), _ => Yaml::String(v.to_owned()), } } } static BAD_VALUE: Yaml = Yaml::BadValue; impl<'a> Index<&'a str> for Yaml { type Output = Yaml; fn index(&self, idx: &'a str) -> &Yaml { let key = Yaml::String(idx.to_owned()); match self.as_hash() { Some(h) => h.get(&key).unwrap_or(&BAD_VALUE), None => &BAD_VALUE, } } } impl Index<usize> for Yaml { type Output = Yaml; fn index(&self, idx: usize) -> &Yaml { if let Some(v) = self.as_vec() { v.get(idx).unwrap_or(&BAD_VALUE) } else if let Some(v) = self.as_hash() { let key = Yaml::Integer(idx as i64); v.get(&key).unwrap_or(&BAD_VALUE) } else { &BAD_VALUE } } } impl IntoIterator for Yaml { type Item = Yaml; type IntoIter = YamlIter; fn into_iter(self) -> Self::IntoIter { YamlIter { yaml: self.into_vec().unwrap_or_else(Vec::new).into_iter(), } } } pub struct YamlIter { yaml: vec::IntoIter<Yaml>, } impl Iterator for YamlIter { type Item = Yaml; fn next(&mut self) -> Option<Yaml> { self.yaml.next() } } #[cfg(test)] mod test { use std::f64; use yaml::*; #[test] fn test_coerce() { let s = "--- a: 1 b: 2.2 c: [1, 2] "; let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out[0]; assert_eq!(doc["a"].as_i64().unwrap(), 1i64); assert_eq!(doc["b"].as_f64().unwrap(), 2.2f64); assert_eq!(doc["c"][1].as_i64().unwrap(), 2i64); assert!(doc["d"][0].is_badvalue()); } #[test] fn test_empty_doc() { let s: String = "".to_owned(); YamlLoader::load_from_str(&s).unwrap(); let s: String = "---".to_owned(); assert_eq!(YamlLoader::load_from_str(&s).unwrap()[0], Yaml::Null); } #[test] fn test_parser() { let s: String = " # comment a0 bb: val a1: b1: 4 b2: d a2: 4 # i'm comment a3: [1, 2, 3] a4: - - a1 - a2 - 2 a5: 'single_quoted' a6: \"double_quoted\" a7: 你好 " .to_owned(); let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out[0]; assert_eq!(doc["a7"].as_str().unwrap(), "你好"); } #[test] fn test_multi_doc() { let s = " 'a scalar' --- 'a scalar' --- 'a scalar' "; let out = YamlLoader::load_from_str(&s).unwrap(); assert_eq!(out.len(), 3); } #[test] fn test_anchor() { let s = " a1: &DEFAULT b1: 4 b2: d a2: *DEFAULT "; let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out[0]; assert_eq!(doc["a2"]["b1"].as_i64().unwrap(), 4); } #[test] fn test_bad_anchor() { let s = " a1: &DEFAULT b1: 4 b2: *DEFAULT "; let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out[0]; assert_eq!(doc["a1"]["b2"], Yaml::BadValue); } #[test] fn test_github_27() { // https://github.com/chyh1990/yaml-rust/issues/27 let s = "&a"; let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out[0]; assert_eq!(doc.as_str().unwrap(), ""); } #[test] fn test_plain_datatype() { let s = " - 'string' - \"string\" - string - 123 - -321 - 1.23 - -1e4 - ~ - null - true - false - !!str 0 - !!int 100 - !!float 2 - !!null ~ - !!bool true - !!bool false - 0xFF # bad values - !!int string - !!float string - !!bool null - !!null val - 0o77 - [ 0xF, 0xF ] - +12345 - [ true, false ] "; let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out[0]; assert_eq!(doc[0].as_str().unwrap(), "string"); assert_eq!(doc[1].as_str().unwrap(), "string"); assert_eq!(doc[2].as_str().unwrap(), "string"); assert_eq!(doc[3].as_i64().unwrap(), 123); assert_eq!(doc[4].as_i64().unwrap(), -321); assert_eq!(doc[5].as_f64().unwrap(), 1.23); assert_eq!(doc[6].as_f64().unwrap(), -1e4); assert!(doc[7].is_null()); assert!(doc[8].is_null()); assert_eq!(doc[9].as_bool().unwrap(), true); assert_eq!(doc[10].as_bool().unwrap(), false); assert_eq!(doc[11].as_str().unwrap(), "0"); assert_eq!(doc[12].as_i64().unwrap(), 100); assert_eq!(doc[13].as_f64().unwrap(), 2.0); assert!(doc[14].is_null()); assert_eq!(doc[15].as_bool().unwrap(), true); assert_eq!(doc[16].as_bool().unwrap(), false); assert_eq!(doc[17].as_i64().unwrap(), 255); assert!(doc[18].is_badvalue()); assert!(doc[19].is_badvalue()); assert!(doc[20].is_badvalue()); assert!(doc[21].is_badvalue()); assert_eq!(doc[22].as_i64().unwrap(), 63); assert_eq!(doc[23][0].as_i64().unwrap(), 15); assert_eq!(doc[23][1].as_i64().unwrap(), 15); assert_eq!(doc[24].as_i64().unwrap(), 12345); assert!(doc[25][0].as_bool().unwrap()); assert!(!doc[25][1].as_bool().unwrap()); } #[test] fn test_bad_hypen() { // See: https://github.com/chyh1990/yaml-rust/issues/23 let s = "{-"; assert!(YamlLoader::load_from_str(&s).is_err()); } #[test] fn test_issue_65() { // See: https://github.com/chyh1990/yaml-rust/issues/65 let b = "\n\"ll\\\"ll\\\r\n\"ll\\\"ll\\\r\r\r\rU\r\r\rU"; assert!(YamlLoader::load_from_str(&b).is_err()); } #[test] fn test_bad_docstart() { assert!(YamlLoader::load_from_str("---This used to cause an infinite loop").is_ok()); assert_eq!( YamlLoader::load_from_str("----"), Ok(vec![Yaml::String(String::from("----"))]) ); assert_eq!( YamlLoader::load_from_str("--- #here goes a comment"), Ok(vec![Yaml::Null]) ); assert_eq!( YamlLoader::load_from_str("---- #here goes a comment"), Ok(vec![Yaml::String(String::from("----"))]) ); } #[test] fn test_plain_datatype_with_into_methods() { let s = " - 'string' - \"string\" - string - 123 - -321 - 1.23 - -1e4 - true - false - !!str 0 - !!int 100 - !!float 2 - !!bool true - !!bool false - 0xFF - 0o77 - +12345 - -.INF - .NAN - !!float .INF "; let mut out = YamlLoader::load_from_str(&s).unwrap().into_iter(); let mut doc = out.next().unwrap().into_iter(); assert_eq!(doc.next().unwrap().into_string().unwrap(), "string"); assert_eq!(doc.next().unwrap().into_string().unwrap(), "string"); assert_eq!(doc.next().unwrap().into_string().unwrap(), "string"); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 123); assert_eq!(doc.next().unwrap().into_i64().unwrap(), -321); assert_eq!(doc.next().unwrap().into_f64().unwrap(), 1.23); assert_eq!(doc.next().unwrap().into_f64().unwrap(), -1e4); assert_eq!(doc.next().unwrap().into_bool().unwrap(), true); assert_eq!(doc.next().unwrap().into_bool().unwrap(), false); assert_eq!(doc.next().unwrap().into_string().unwrap(), "0"); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 100); assert_eq!(doc.next().unwrap().into_f64().unwrap(), 2.0); assert_eq!(doc.next().unwrap().into_bool().unwrap(), true); assert_eq!(doc.next().unwrap().into_bool().unwrap(), false); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 255); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 63); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 12345); assert_eq!(doc.next().unwrap().into_f64().unwrap(), f64::NEG_INFINITY); assert!(doc.next().unwrap().into_f64().is_some()); assert_eq!(doc.next().unwrap().into_f64().unwrap(), f64::INFINITY); } #[test] fn test_hash_order() { let s = "--- b: ~ a: ~ c: ~ "; let out = YamlLoader::load_from_str(&s).unwrap(); let first = out.into_iter().next().unwrap(); let mut iter = first.into_hash().unwrap().into_iter(); assert_eq!( Some((Yaml::String("b".to_owned()), Yaml::Null)), iter.next() ); assert_eq!( Some((Yaml::String("a".to_owned()), Yaml::Null)), iter.next() ); assert_eq!( Some((Yaml::String("c".to_owned()), Yaml::Null)), iter.next() ); assert_eq!(None, iter.next()); } #[test] fn test_integer_key() { let s = " 0: important: true 1: important: false "; let out = YamlLoader::load_from_str(&s).unwrap(); let first = out.into_iter().next().unwrap(); assert_eq!(first[0]["important"].as_bool().unwrap(), true); } #[test] fn test_indentation_equality() { let four_spaces = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); let two_spaces = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); let one_space = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); let mixed_spaces = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); assert_eq!(four_spaces, two_spaces); assert_eq!(two_spaces, one_space); assert_eq!(four_spaces, mixed_spaces); } #[test] fn test_two_space_indentations() { // https://github.com/kbknapp/clap-rs/issues/965 let s = r#" subcommands: - server: about: server related commands subcommands2: - server: about: server related commands subcommands3: - server: about: server related commands "#; let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out.into_iter().next().unwrap(); println!("{:#?}", doc); assert_eq!(doc["subcommands"][0]["server"], Yaml::Null); assert!(doc["subcommands2"][0]["server"].as_hash().is_some()); assert!(doc["subcommands3"][0]["server"].as_hash().is_some()); } #[test] fn test_recursion_depth_check_objects() { let s = "{a:".repeat(10_000) + &"}".repeat(10_000); assert!(YamlLoader::load_from_str(&s).is_err()); } #[test] fn test_recursion_depth_check_arrays() { let s = "[".repeat(10_000) + &"]".repeat(10_000); assert!(YamlLoader::load_from_str(&s).is_err()); } }
27.981081
93
0.495364
bb00fc5d2d5d5bf3e5f206b2db4963ca9bc5bc41
4,863
use super::{Contract, ContractVariable, Symbol}; use output::Output; use parser::ast; use resolver::cfg::{ControlFlowGraph, Instr, Storage, Vartable}; use resolver::expression::{cast, expression, Expression}; use resolver::ContractVariableType; pub fn contract_variables( def: &ast::ContractDefinition, ns: &mut Contract, errors: &mut Vec<Output>, ) -> bool { let mut broken = false; let mut vartab = Vartable::new(); let mut cfg = ControlFlowGraph::new(); for parts in &def.parts { if let ast::ContractPart::ContractVariableDefinition(ref s) = parts { if !var_decl(s, ns, &mut cfg, &mut vartab, errors) { broken = true; } } } cfg.add(&mut vartab, Instr::Return { value: Vec::new() }); cfg.vars = vartab.drain(); ns.initializer = cfg; broken } fn var_decl( s: &ast::ContractVariableDefinition, ns: &mut Contract, cfg: &mut ControlFlowGraph, vartab: &mut Vartable, errors: &mut Vec<Output>, ) -> bool { let ty = match ns.resolve_type(&s.ty, errors) { Ok(s) => s, Err(()) => { return false; } }; let mut is_constant = false; let mut visibility: Option<ast::Visibility> = None; for attr in &s.attrs { match &attr { ast::VariableAttribute::Constant(loc) => { if is_constant { errors.push(Output::warning( *loc, "duplicate constant attribute".to_string(), )); } is_constant = true; } ast::VariableAttribute::Visibility(ast::Visibility::External(loc)) => { errors.push(Output::error( *loc, "variable cannot be declared external".to_string(), )); return false; } ast::VariableAttribute::Visibility(v) => { if let Some(e) = &visibility { errors.push(Output::error_with_note( v.loc(), format!("variable visibility redeclared `{}'", v.to_string()), e.loc(), format!("location of previous declaration of `{}'", e.to_string()), )); return false; } visibility = Some(v.clone()); } } } let visibility = match visibility { Some(v) => v, None => ast::Visibility::Private(ast::Loc(0, 0)), }; let var = if !is_constant { let storage = ns.top_of_contract_storage.clone(); ns.top_of_contract_storage += ty.storage_slots(ns); ContractVariableType::Storage(storage) } else { ContractVariableType::Constant(ns.constants.len()) }; let initializer = if let Some(initializer) = &s.initializer { let expr = if is_constant { expression(&initializer, cfg, &ns, &mut None, errors) } else { expression(&initializer, cfg, &ns, &mut Some(vartab), errors) }; let (res, resty) = match expr { Ok((res, ty)) => (res, ty), Err(()) => return false, }; // implicityly conversion to correct ty let res = match cast(&s.loc, res, &resty, &ty, true, &ns, errors) { Ok(res) => res, Err(_) => return false, }; Some(res) } else { if is_constant { errors.push(Output::decl_error( s.loc, "missing initializer for constant".to_string(), )); return false; } None }; let sdecl = ContractVariable { name: s.name.name.to_string(), doc: s.doc.clone(), visibility, ty: ty.clone(), var, }; let pos = ns.variables.len(); ns.variables.push(sdecl); if !ns.add_symbol(&s.name, Symbol::Variable(s.loc, pos), errors) { return false; } if let Some(res) = initializer { if is_constant { ns.constants.push(res); } else { let var = vartab.find(&s.name, ns, errors).unwrap(); let loc = res.loc(); cfg.add( vartab, Instr::Set { res: var.pos, expr: res, }, ); if let Storage::Contract(offset) = &var.storage { cfg.add( vartab, Instr::SetStorage { ty, local: var.pos, storage: Expression::NumberLiteral(loc, 256, offset.clone()), }, ); } } } true }
27.788571
91
0.481596
08b58c523b5e3c618653d42b16b00445b66de38f
23,758
use anyhow::Result; use bevy_asset::{ AssetIoError, AssetLoader, AssetPath, BoxedFuture, Handle, LoadContext, LoadedAsset, }; use bevy_core::Name; use bevy_ecs::world::World; use bevy_math::Mat4; use bevy_pbr::prelude::{PbrBundle, StandardMaterial}; use bevy_render::{ camera::{ Camera, CameraProjection, OrthographicProjection, PerspectiveProjection, VisibleEntities, }, mesh::{Indices, Mesh, VertexAttributeValues}, pipeline::PrimitiveTopology, prelude::{Color, Texture}, render_graph::base, texture::{AddressMode, FilterMode, ImageType, SamplerDescriptor, TextureError}, }; use bevy_scene::Scene; use bevy_transform::{ hierarchy::{BuildWorldChildren, WorldChildBuilder}, prelude::{GlobalTransform, Transform}, }; use gltf::{ mesh::Mode, texture::{MagFilter, MinFilter, WrappingMode}, Material, Primitive, }; use std::{collections::HashMap, path::Path}; use thiserror::Error; use crate::{Gltf, GltfNode}; /// An error that occurs when loading a GLTF file #[derive(Error, Debug)] pub enum GltfError { #[error("unsupported primitive mode")] UnsupportedPrimitive { mode: Mode }, #[error("invalid GLTF file")] Gltf(#[from] gltf::Error), #[error("binary blob is missing")] MissingBlob, #[error("failed to decode base64 mesh data")] Base64Decode(#[from] base64::DecodeError), #[error("unsupported buffer format")] BufferFormatUnsupported, #[error("invalid image mime type")] InvalidImageMimeType(String), #[error("failed to load an image")] ImageError(#[from] TextureError), #[error("failed to load an asset path")] AssetIoError(#[from] AssetIoError), } /// Loads meshes from GLTF files into Mesh assets #[derive(Default)] pub struct GltfLoader; impl AssetLoader for GltfLoader { fn load<'a>( &'a self, bytes: &'a [u8], load_context: &'a mut LoadContext, ) -> BoxedFuture<'a, Result<()>> { Box::pin(async move { Ok(load_gltf(bytes, load_context).await?) }) } fn extensions(&self) -> &[&str] { &["gltf", "glb"] } } async fn load_gltf<'a, 'b>( bytes: &'a [u8], load_context: &'a mut LoadContext<'b>, ) -> Result<(), GltfError> { let gltf = gltf::Gltf::from_slice(bytes)?; let buffer_data = load_buffers(&gltf, load_context, load_context.path()).await?; let mut materials = vec![]; let mut named_materials = HashMap::new(); for material in gltf.materials() { let handle = load_material(&material, load_context); if let Some(name) = material.name() { named_materials.insert(name.to_string(), handle.clone()); } materials.push(handle); } let mut meshes = vec![]; let mut named_meshes = HashMap::new(); for mesh in gltf.meshes() { let mut primitives = vec![]; for primitive in mesh.primitives() { let primitive_label = primitive_label(&mesh, &primitive); let reader = primitive.reader(|buffer| Some(&buffer_data[buffer.index()])); let primitive_topology = get_primitive_topology(primitive.mode())?; let mut mesh = Mesh::new(primitive_topology); if let Some(vertex_attribute) = reader .read_positions() .map(|v| VertexAttributeValues::Float3(v.collect())) { mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, vertex_attribute); } if let Some(vertex_attribute) = reader .read_normals() .map(|v| VertexAttributeValues::Float3(v.collect())) { mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, vertex_attribute); } if let Some(vertex_attribute) = reader .read_tex_coords(0) .map(|v| VertexAttributeValues::Float2(v.into_f32().collect())) { mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, vertex_attribute); } if let Some(indices) = reader.read_indices() { mesh.set_indices(Some(Indices::U32(indices.into_u32().collect()))); }; let mesh = load_context.set_labeled_asset(&primitive_label, LoadedAsset::new(mesh)); primitives.push(super::GltfPrimitive { mesh, material: primitive .material() .index() .and_then(|i| materials.get(i).cloned()), }); } let handle = load_context.set_labeled_asset( &mesh_label(&mesh), LoadedAsset::new(super::GltfMesh { primitives }), ); if let Some(name) = mesh.name() { named_meshes.insert(name.to_string(), handle.clone()); } meshes.push(handle); } let mut nodes_intermediate = vec![]; let mut named_nodes_intermediate = HashMap::new(); for node in gltf.nodes() { let node_label = node_label(&node); nodes_intermediate.push(( node_label, GltfNode { children: vec![], mesh: node .mesh() .map(|mesh| mesh.index()) .and_then(|i| meshes.get(i).cloned()), transform: match node.transform() { gltf::scene::Transform::Matrix { matrix } => { Transform::from_matrix(bevy_math::Mat4::from_cols_array_2d(&matrix)) } gltf::scene::Transform::Decomposed { translation, rotation, scale, } => Transform { translation: bevy_math::Vec3::from(translation), rotation: bevy_math::Quat::from(rotation), scale: bevy_math::Vec3::from(scale), }, }, }, node.children() .map(|child| child.index()) .collect::<Vec<_>>(), )); if let Some(name) = node.name() { named_nodes_intermediate.insert(name, node.index()); } } let nodes = resolve_node_hierarchy(nodes_intermediate) .into_iter() .map(|(label, node)| load_context.set_labeled_asset(&label, LoadedAsset::new(node))) .collect::<Vec<bevy_asset::Handle<GltfNode>>>(); let named_nodes = named_nodes_intermediate .into_iter() .filter_map(|(name, index)| { nodes .get(index) .map(|handle| (name.to_string(), handle.clone())) }) .collect(); for gltf_texture in gltf.textures() { if let gltf::image::Source::View { view, mime_type } = gltf_texture.source().source() { let start = view.offset() as usize; let end = (view.offset() + view.length()) as usize; let buffer = &buffer_data[view.buffer().index()][start..end]; let texture_label = texture_label(&gltf_texture); let mut texture = Texture::from_buffer(buffer, ImageType::MimeType(mime_type))?; texture.sampler = texture_sampler(&gltf_texture); load_context.set_labeled_asset::<Texture>(&texture_label, LoadedAsset::new(texture)); } } let mut scenes = vec![]; let mut named_scenes = HashMap::new(); for scene in gltf.scenes() { let mut err = None; let mut world = World::default(); world .spawn() .insert_bundle((Transform::identity(), GlobalTransform::identity())) .with_children(|parent| { for node in scene.nodes() { let result = load_node(&node, parent, load_context, &buffer_data); if result.is_err() { err = Some(result); return; } } }); if let Some(Err(err)) = err { return Err(err); } let scene_handle = load_context .set_labeled_asset(&scene_label(&scene), LoadedAsset::new(Scene::new(world))); if let Some(name) = scene.name() { named_scenes.insert(name.to_string(), scene_handle.clone()); } scenes.push(scene_handle); } load_context.set_default_asset(LoadedAsset::new(Gltf { default_scene: gltf .default_scene() .and_then(|scene| scenes.get(scene.index())) .cloned(), scenes, named_scenes, meshes, named_meshes, materials, named_materials, nodes, named_nodes, })); Ok(()) } fn load_material(material: &Material, load_context: &mut LoadContext) -> Handle<StandardMaterial> { let material_label = material_label(&material); let pbr = material.pbr_metallic_roughness(); let mut dependencies = Vec::new(); let texture_handle = if let Some(info) = pbr.base_color_texture() { match info.texture().source().source() { gltf::image::Source::View { .. } => { let label = texture_label(&info.texture()); let path = AssetPath::new_ref(load_context.path(), Some(&label)); Some(load_context.get_handle(path)) } gltf::image::Source::Uri { uri, .. } => { let parent = load_context.path().parent().unwrap(); let image_path = parent.join(uri); let asset_path = AssetPath::new(image_path, None); let handle = load_context.get_handle(asset_path.clone()); dependencies.push(asset_path); Some(handle) } } } else { None }; let color = pbr.base_color_factor(); load_context.set_labeled_asset( &material_label, LoadedAsset::new(StandardMaterial { base_color: Color::rgba(color[0], color[1], color[2], color[3]), base_color_texture: texture_handle, roughness: pbr.roughness_factor(), metallic: pbr.metallic_factor(), unlit: material.unlit(), ..Default::default() }) .with_dependencies(dependencies), ) } fn load_node( gltf_node: &gltf::Node, world_builder: &mut WorldChildBuilder, load_context: &mut LoadContext, buffer_data: &[Vec<u8>], ) -> Result<(), GltfError> { let transform = gltf_node.transform(); let mut gltf_error = None; let mut node = world_builder.spawn_bundle(( Transform::from_matrix(Mat4::from_cols_array_2d(&transform.matrix())), GlobalTransform::identity(), )); if let Some(name) = gltf_node.name() { node.insert(Name::new(name.to_string())); } // create camera node if let Some(camera) = gltf_node.camera() { node.insert(VisibleEntities { ..Default::default() }); match camera.projection() { gltf::camera::Projection::Orthographic(orthographic) => { let xmag = orthographic.xmag(); let ymag = orthographic.ymag(); let orthographic_projection: OrthographicProjection = OrthographicProjection { left: -xmag, right: xmag, top: ymag, bottom: -ymag, far: orthographic.zfar(), near: orthographic.znear(), ..Default::default() }; node.insert(Camera { name: Some(base::camera::CAMERA_2D.to_owned()), projection_matrix: orthographic_projection.get_projection_matrix(), ..Default::default() }); node.insert(orthographic_projection); } gltf::camera::Projection::Perspective(perspective) => { let mut perspective_projection: PerspectiveProjection = PerspectiveProjection { fov: perspective.yfov(), near: perspective.znear(), ..Default::default() }; if let Some(zfar) = perspective.zfar() { perspective_projection.far = zfar; } if let Some(aspect_ratio) = perspective.aspect_ratio() { perspective_projection.aspect_ratio = aspect_ratio; } node.insert(Camera { name: Some(base::camera::CAMERA_3D.to_owned()), projection_matrix: perspective_projection.get_projection_matrix(), ..Default::default() }); node.insert(perspective_projection); } } } node.with_children(|parent| { if let Some(mesh) = gltf_node.mesh() { // append primitives for primitive in mesh.primitives() { let material = primitive.material(); let material_label = material_label(&material); // This will make sure we load the default material now since it would not have been // added when iterating over all the gltf materials (since the default material is // not explicitly listed in the gltf). if !load_context.has_labeled_asset(&material_label) { load_material(&material, load_context); } let primitive_label = primitive_label(&mesh, &primitive); let mesh_asset_path = AssetPath::new_ref(load_context.path(), Some(&primitive_label)); let material_asset_path = AssetPath::new_ref(load_context.path(), Some(&material_label)); parent.spawn_bundle(PbrBundle { mesh: load_context.get_handle(mesh_asset_path), material: load_context.get_handle(material_asset_path), ..Default::default() }); } } // append other nodes for child in gltf_node.children() { if let Err(err) = load_node(&child, parent, load_context, buffer_data) { gltf_error = Some(err); return; } } }); if let Some(err) = gltf_error { Err(err) } else { Ok(()) } } fn mesh_label(mesh: &gltf::Mesh) -> String { format!("Mesh{}", mesh.index()) } fn primitive_label(mesh: &gltf::Mesh, primitive: &Primitive) -> String { format!("Mesh{}/Primitive{}", mesh.index(), primitive.index()) } fn material_label(material: &gltf::Material) -> String { if let Some(index) = material.index() { format!("Material{}", index) } else { "MaterialDefault".to_string() } } fn texture_label(texture: &gltf::Texture) -> String { format!("Texture{}", texture.index()) } fn node_label(node: &gltf::Node) -> String { format!("Node{}", node.index()) } fn scene_label(scene: &gltf::Scene) -> String { format!("Scene{}", scene.index()) } fn texture_sampler(texture: &gltf::Texture) -> SamplerDescriptor { let gltf_sampler = texture.sampler(); SamplerDescriptor { address_mode_u: texture_address_mode(&gltf_sampler.wrap_s()), address_mode_v: texture_address_mode(&gltf_sampler.wrap_t()), mag_filter: gltf_sampler .mag_filter() .map(|mf| match mf { MagFilter::Nearest => FilterMode::Nearest, MagFilter::Linear => FilterMode::Linear, }) .unwrap_or(SamplerDescriptor::default().mag_filter), min_filter: gltf_sampler .min_filter() .map(|mf| match mf { MinFilter::Nearest | MinFilter::NearestMipmapNearest | MinFilter::NearestMipmapLinear => FilterMode::Nearest, MinFilter::Linear | MinFilter::LinearMipmapNearest | MinFilter::LinearMipmapLinear => FilterMode::Linear, }) .unwrap_or(SamplerDescriptor::default().min_filter), mipmap_filter: gltf_sampler .min_filter() .map(|mf| match mf { MinFilter::Nearest | MinFilter::Linear | MinFilter::NearestMipmapNearest | MinFilter::LinearMipmapNearest => FilterMode::Nearest, MinFilter::NearestMipmapLinear | MinFilter::LinearMipmapLinear => { FilterMode::Linear } }) .unwrap_or(SamplerDescriptor::default().mipmap_filter), ..Default::default() } } fn texture_address_mode(gltf_address_mode: &gltf::texture::WrappingMode) -> AddressMode { match gltf_address_mode { WrappingMode::ClampToEdge => AddressMode::ClampToEdge, WrappingMode::Repeat => AddressMode::Repeat, WrappingMode::MirroredRepeat => AddressMode::MirrorRepeat, } } fn get_primitive_topology(mode: Mode) -> Result<PrimitiveTopology, GltfError> { match mode { Mode::Points => Ok(PrimitiveTopology::PointList), Mode::Lines => Ok(PrimitiveTopology::LineList), Mode::LineStrip => Ok(PrimitiveTopology::LineStrip), Mode::Triangles => Ok(PrimitiveTopology::TriangleList), Mode::TriangleStrip => Ok(PrimitiveTopology::TriangleStrip), mode => Err(GltfError::UnsupportedPrimitive { mode }), } } async fn load_buffers( gltf: &gltf::Gltf, load_context: &LoadContext<'_>, asset_path: &Path, ) -> Result<Vec<Vec<u8>>, GltfError> { const OCTET_STREAM_URI: &str = "data:application/octet-stream;base64,"; let mut buffer_data = Vec::new(); for buffer in gltf.buffers() { match buffer.source() { gltf::buffer::Source::Uri(uri) => { if uri.starts_with("data:") { buffer_data.push(base64::decode( uri.strip_prefix(OCTET_STREAM_URI) .ok_or(GltfError::BufferFormatUnsupported)?, )?); } else { // TODO: Remove this and add dep let buffer_path = asset_path.parent().unwrap().join(uri); let buffer_bytes = load_context.read_asset_bytes(buffer_path).await?; buffer_data.push(buffer_bytes); } } gltf::buffer::Source::Bin => { if let Some(blob) = gltf.blob.as_deref() { buffer_data.push(blob.into()); } else { return Err(GltfError::MissingBlob); } } } } Ok(buffer_data) } fn resolve_node_hierarchy( nodes_intermediate: Vec<(String, GltfNode, Vec<usize>)>, ) -> Vec<(String, GltfNode)> { let mut max_steps = nodes_intermediate.len(); let mut nodes_step = nodes_intermediate .into_iter() .enumerate() .map(|(i, (label, node, children))| (i, label, node, children)) .collect::<Vec<_>>(); let mut nodes = std::collections::HashMap::<usize, (String, GltfNode)>::new(); while max_steps > 0 && !nodes_step.is_empty() { if let Some((index, label, node, _)) = nodes_step .iter() .find(|(_, _, _, children)| children.is_empty()) .cloned() { nodes.insert(index, (label, node)); for (_, _, node, children) in nodes_step.iter_mut() { if let Some((i, _)) = children .iter() .enumerate() .find(|(_, child_index)| **child_index == index) { children.remove(i); if let Some((_, child_node)) = nodes.get(&index) { node.children.push(child_node.clone()) } } } nodes_step = nodes_step .into_iter() .filter(|(i, _, _, _)| *i != index) .collect() } max_steps -= 1; } let mut nodes_to_sort = nodes.into_iter().collect::<Vec<_>>(); nodes_to_sort.sort_by_key(|(i, _)| *i); nodes_to_sort .into_iter() .map(|(_, resolved)| resolved) .collect() } #[cfg(test)] mod test { use super::resolve_node_hierarchy; use crate::GltfNode; impl GltfNode { fn empty() -> Self { GltfNode { children: vec![], mesh: None, transform: bevy_transform::prelude::Transform::identity(), } } } #[test] fn node_hierarchy_single_node() { let result = resolve_node_hierarchy(vec![("l1".to_string(), GltfNode::empty(), vec![])]); assert_eq!(result.len(), 1); assert_eq!(result[0].0, "l1"); assert_eq!(result[0].1.children.len(), 0); } #[test] fn node_hierarchy_no_hierarchy() { let result = resolve_node_hierarchy(vec![ ("l1".to_string(), GltfNode::empty(), vec![]), ("l2".to_string(), GltfNode::empty(), vec![]), ]); assert_eq!(result.len(), 2); assert_eq!(result[0].0, "l1"); assert_eq!(result[0].1.children.len(), 0); assert_eq!(result[1].0, "l2"); assert_eq!(result[1].1.children.len(), 0); } #[test] fn node_hierarchy_simple_hierarchy() { let result = resolve_node_hierarchy(vec![ ("l1".to_string(), GltfNode::empty(), vec![1]), ("l2".to_string(), GltfNode::empty(), vec![]), ]); assert_eq!(result.len(), 2); assert_eq!(result[0].0, "l1"); assert_eq!(result[0].1.children.len(), 1); assert_eq!(result[1].0, "l2"); assert_eq!(result[1].1.children.len(), 0); } #[test] fn node_hierarchy_hierarchy() { let result = resolve_node_hierarchy(vec![ ("l1".to_string(), GltfNode::empty(), vec![1]), ("l2".to_string(), GltfNode::empty(), vec![2]), ("l3".to_string(), GltfNode::empty(), vec![3, 4, 5]), ("l4".to_string(), GltfNode::empty(), vec![6]), ("l5".to_string(), GltfNode::empty(), vec![]), ("l6".to_string(), GltfNode::empty(), vec![]), ("l7".to_string(), GltfNode::empty(), vec![]), ]); assert_eq!(result.len(), 7); assert_eq!(result[0].0, "l1"); assert_eq!(result[0].1.children.len(), 1); assert_eq!(result[1].0, "l2"); assert_eq!(result[1].1.children.len(), 1); assert_eq!(result[2].0, "l3"); assert_eq!(result[2].1.children.len(), 3); assert_eq!(result[3].0, "l4"); assert_eq!(result[3].1.children.len(), 1); assert_eq!(result[4].0, "l5"); assert_eq!(result[4].1.children.len(), 0); assert_eq!(result[5].0, "l6"); assert_eq!(result[5].1.children.len(), 0); assert_eq!(result[6].0, "l7"); assert_eq!(result[6].1.children.len(), 0); } #[test] fn node_hierarchy_cyclic() { let result = resolve_node_hierarchy(vec![ ("l1".to_string(), GltfNode::empty(), vec![1]), ("l2".to_string(), GltfNode::empty(), vec![0]), ]); assert_eq!(result.len(), 0); } #[test] fn node_hierarchy_missing_node() { let result = resolve_node_hierarchy(vec![ ("l1".to_string(), GltfNode::empty(), vec![2]), ("l2".to_string(), GltfNode::empty(), vec![]), ]); assert_eq!(result.len(), 1); assert_eq!(result[0].0, "l2"); assert_eq!(result[0].1.children.len(), 0); } }
35.197037
100
0.54529
f7e3c73d5bc1ee8245aceeec055cc0667e01f36f
4,313
use crate::type_name::PrettyType; use crate::value::primitive::Primitive; use crate::value::{UntaggedValue, Value}; use nu_source::{DbgDocBldr, DebugDocBuilder, PrettyDebug}; impl PrettyDebug for &Value { /// Get a borrowed Value ready to be pretty-printed fn pretty(&self) -> DebugDocBuilder { PrettyDebug::pretty(*self) } } impl PrettyDebug for Value { /// Get a Value ready to be pretty-printed fn pretty(&self) -> DebugDocBuilder { match &self.value { UntaggedValue::Primitive(p) => p.pretty(), UntaggedValue::Row(row) => row.pretty_builder().nest(1).group().into(), UntaggedValue::Table(table) => DbgDocBldr::delimit( "[", DbgDocBldr::intersperse(table, DbgDocBldr::space()), "]", ) .nest(), UntaggedValue::Error(_) => DbgDocBldr::error("error"), UntaggedValue::Block(_) => DbgDocBldr::opaque("block"), } } } impl PrettyType for Primitive { /// Find the type of the Value and prepare it for pretty-printing fn pretty_type(&self) -> DebugDocBuilder { match self { Primitive::Nothing => ty("nothing"), Primitive::Int(_) => ty("integer"), Primitive::Range(_) => ty("range"), Primitive::Decimal(_) => ty("decimal"), Primitive::Filesize(_) => ty("filesize"), Primitive::String(_) => ty("string"), Primitive::ColumnPath(_) => ty("column-path"), Primitive::GlobPattern(_) => ty("pattern"), Primitive::Boolean(_) => ty("boolean"), Primitive::Date(_) => ty("date"), Primitive::Duration(_) => ty("duration"), Primitive::FilePath(_) => ty("path"), Primitive::Binary(_) => ty("binary"), Primitive::BeginningOfStream => DbgDocBldr::keyword("beginning-of-stream"), Primitive::EndOfStream => DbgDocBldr::keyword("end-of-stream"), } } } impl PrettyDebug for Primitive { /// Get a Primitive value ready to be pretty-printed fn pretty(&self) -> DebugDocBuilder { match self { Primitive::Nothing => DbgDocBldr::primitive("nothing"), Primitive::Int(int) => prim(format_args!("{}", int)), Primitive::Decimal(decimal) => prim(format_args!("{}", decimal)), Primitive::Range(range) => { let (left, left_inclusion) = &range.from; let (right, right_inclusion) = &range.to; DbgDocBldr::typed( "range", (left_inclusion.debug_left_bracket() + left.pretty() + DbgDocBldr::operator(",") + DbgDocBldr::space() + right.pretty() + right_inclusion.debug_right_bracket()) .group(), ) } Primitive::Filesize(bytes) => primitive_doc(bytes, "filesize"), Primitive::String(string) => prim(string), Primitive::ColumnPath(path) => path.pretty(), Primitive::GlobPattern(pattern) => primitive_doc(pattern, "pattern"), Primitive::Boolean(boolean) => match boolean { true => DbgDocBldr::primitive("$yes"), false => DbgDocBldr::primitive("$no"), }, Primitive::Date(date) => primitive_doc(date, "date"), Primitive::Duration(duration) => primitive_doc(duration, "nanoseconds"), Primitive::FilePath(path) => primitive_doc(path, "path"), Primitive::Binary(_) => DbgDocBldr::opaque("binary"), Primitive::BeginningOfStream => DbgDocBldr::keyword("beginning-of-stream"), Primitive::EndOfStream => DbgDocBldr::keyword("end-of-stream"), } } } fn prim(name: impl std::fmt::Debug) -> DebugDocBuilder { DbgDocBldr::primitive(format!("{:?}", name)) } fn primitive_doc(name: impl std::fmt::Debug, ty: impl Into<String>) -> DebugDocBuilder { DbgDocBldr::primitive(format!("{:?}", name)) + DbgDocBldr::delimit("(", DbgDocBldr::kind(ty.into()), ")") } fn ty(name: impl std::fmt::Debug) -> DebugDocBuilder { DbgDocBldr::kind(format!("{:?}", name)) }
40.688679
88
0.553443
11a027a13f7e317f442d5ae5c91332ebbcc01ac6
655
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![crate_name = "foo"] #![feature(optin_builtin_traits)] pub struct Foo; // @has foo/struct.Foo.html // @!has - 'Auto Trait Implementations' impl !Send for Foo {} impl !Sync for Foo {}
31.190476
68
0.723664
d7b32a8484e60074b848828ed978e3c4eb26aba0
1,555
#![allow(clippy::use_self)] extern crate clap; extern crate env_logger; extern crate ruma; use clap::{App, AppSettings, Arg, SubCommand}; use ruma::config::Config; use ruma::server::Server; fn main() { if let Err(error) = env_logger::try_init() { eprintln!("Failed to initialize logger: {}", error); } let matches = App::new("ruma-extra-server") .version(env!("CARGO_PKG_VERSION")) .about("Extra APIs for Ruma.") .setting(AppSettings::GlobalVersion) .setting(AppSettings::SubcommandRequiredElseHelp) .subcommand( SubCommand::with_name("run").about("Runs the server").arg( Arg::with_name("config") .short("c") .long("config") .value_name("PATH") .help("Path to a configuration file") .takes_value(true), ), ) .get_matches(); match matches.subcommand() { ("run", Some(submatches)) => { let config = match Config::from_file(submatches.value_of("config")) { Ok(config) => config, Err(error) => { eprintln!("Failed to load configuration file: {}", error); return; } }; let server = Server::new(&config).mount_extra(); if let Err(error) = server.run() { eprintln!("Server failed: {}", error); } } _ => println!("{}", matches.usage()), }; }
28.796296
81
0.507395
d92c0a2731fd7b7d8e9672d98a961dfcd71b5790
3,155
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub struct Config { pub(crate) make_token: Box<dyn crate::idempotency_token::MakeIdempotencyToken>, pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>, pub(crate) region: Option<aws_types::region::Region>, pub(crate) credentials_provider: std::sync::Arc<dyn aws_auth::ProvideCredentials>, } impl std::fmt::Debug for Config { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut config = f.debug_struct("Config"); config.finish() } } impl Config { pub fn builder() -> Builder { Builder::default() } /// The signature version 4 service signing name to use in the credential scope when signing requests. /// /// The signing service may be overidden by the `Endpoint`, or by specifying a custom [`SigningService`](aws_types::SigningService) during /// operation construction pub fn signing_service(&self) -> &'static str { "ssm" } } #[derive(Default)] pub struct Builder { make_token: Option<Box<dyn crate::idempotency_token::MakeIdempotencyToken>>, endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>, region: Option<aws_types::region::Region>, credentials_provider: Option<std::sync::Arc<dyn aws_auth::ProvideCredentials>>, } impl Builder { pub fn new() -> Self { Self::default() } pub fn make_token( mut self, make_token: impl crate::idempotency_token::MakeIdempotencyToken + 'static, ) -> Self { self.make_token = Some(Box::new(make_token)); self } pub fn endpoint_resolver( mut self, endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static, ) -> Self { self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver)); self } pub fn region(mut self, region_provider: impl aws_types::region::ProvideRegion) -> Self { self.region = region_provider.region(); self } /// Set the credentials provider for this service pub fn credentials_provider( mut self, credentials_provider: impl aws_auth::ProvideCredentials + 'static, ) -> Self { self.credentials_provider = Some(std::sync::Arc::new(credentials_provider)); self } pub fn build(self) -> Config { Config { make_token: self .make_token .unwrap_or_else(|| Box::new(crate::idempotency_token::default_provider())), endpoint_resolver: self.endpoint_resolver.unwrap_or_else(|| { ::std::sync::Arc::new(aws_endpoint::DefaultAwsEndpointResolver::for_service("ssm")) }), region: { use aws_types::region::ProvideRegion; self.region .or_else(|| aws_types::region::default_provider().region()) }, credentials_provider: self .credentials_provider .unwrap_or_else(|| std::sync::Arc::new(aws_auth::default_provider())), } } }
38.47561
142
0.63233
29c4d3898da39b590718b4bc4d1a00bf80fed001
40,196
#![cfg(target_os = "windows")] use parking_lot::Mutex; use raw_window_handle::{RawWindowHandle, Win32Handle}; use std::{ cell::Cell, ffi::c_void, io, mem, panic, ptr, sync::{mpsc::channel, Arc}, }; use windows_sys::Win32::{ Foundation::{ HINSTANCE, HWND, LPARAM, OLE_E_WRONGCOMPOBJ, POINT, POINTS, RECT, RPC_E_CHANGED_MODE, S_OK, WPARAM, }, Graphics::{ Dwm::{DwmEnableBlurBehindWindow, DWM_BB_BLURREGION, DWM_BB_ENABLE, DWM_BLURBEHIND}, Gdi::{ ChangeDisplaySettingsExW, ClientToScreen, CreateRectRgn, DeleteObject, InvalidateRgn, RedrawWindow, CDS_FULLSCREEN, DISP_CHANGE_BADFLAGS, DISP_CHANGE_BADMODE, DISP_CHANGE_BADPARAM, DISP_CHANGE_FAILED, DISP_CHANGE_SUCCESSFUL, RDW_INTERNALPAINT, }, }, System::{ Com::{ CoCreateInstance, CoInitializeEx, CoUninitialize, CLSCTX_ALL, COINIT_APARTMENTTHREADED, }, Ole::{OleInitialize, RegisterDragDrop}, }, UI::{ Input::{ KeyboardAndMouse::{ EnableWindow, GetActiveWindow, MapVirtualKeyW, ReleaseCapture, SendInput, INPUT, INPUT_0, INPUT_KEYBOARD, KEYBDINPUT, KEYEVENTF_EXTENDEDKEY, KEYEVENTF_KEYUP, VK_LMENU, VK_MENU, }, Touch::{RegisterTouchWindow, TWF_WANTPALM}, }, WindowsAndMessaging::{ CreateWindowExW, FlashWindowEx, GetClientRect, GetCursorPos, GetForegroundWindow, GetSystemMetrics, GetWindowPlacement, IsWindowVisible, LoadCursorW, PeekMessageW, PostMessageW, RegisterClassExW, SetCursor, SetCursorPos, SetForegroundWindow, SetWindowPlacement, SetWindowPos, SetWindowTextW, CS_HREDRAW, CS_VREDRAW, CW_USEDEFAULT, FLASHWINFO, FLASHW_ALL, FLASHW_STOP, FLASHW_TIMERNOFG, FLASHW_TRAY, GWLP_HINSTANCE, HTCAPTION, MAPVK_VK_TO_VSC, NID_READY, PM_NOREMOVE, SM_DIGITIZER, SWP_ASYNCWINDOWPOS, SWP_NOACTIVATE, SWP_NOSIZE, SWP_NOZORDER, WM_NCLBUTTONDOWN, WNDCLASSEXW, }, }, }; use crate::{ dpi::{PhysicalPosition, PhysicalSize, Position, Size}, error::{ExternalError, NotSupportedError, OsError as RootOsError}, icon::Icon, monitor::MonitorHandle as RootMonitorHandle, platform_impl::platform::{ dark_mode::try_theme, definitions::{ CLSID_TaskbarList, IID_ITaskbarList, IID_ITaskbarList2, ITaskbarList, ITaskbarList2, }, dpi::{dpi_to_scale_factor, enable_non_client_dpi_scaling, hwnd_dpi}, drop_handler::FileDropHandler, event_loop::{self, EventLoopWindowTarget, DESTROY_MSG_ID}, icon::{self, IconType}, ime::ImeContext, monitor, util, window_state::{CursorFlags, SavedWindow, WindowFlags, WindowState}, Parent, PlatformSpecificWindowBuilderAttributes, WindowId, }, window::{CursorIcon, Fullscreen, Theme, UserAttentionType, WindowAttributes}, }; /// The Win32 implementation of the main `Window` object. pub struct Window { /// Main handle for the window. window: WindowWrapper, /// The current window state. window_state: Arc<Mutex<WindowState>>, // The events loop proxy. thread_executor: event_loop::EventLoopThreadExecutor, } impl Window { pub fn new<T: 'static>( event_loop: &EventLoopWindowTarget<T>, w_attr: WindowAttributes, pl_attr: PlatformSpecificWindowBuilderAttributes, ) -> Result<Window, RootOsError> { // We dispatch an `init` function because of code style. // First person to remove the need for cloning here gets a cookie! // // done. you owe me -- ossi unsafe { init(w_attr, pl_attr, event_loop) } } pub fn set_title(&self, text: &str) { let wide_text = util::encode_wide(text); unsafe { SetWindowTextW(self.hwnd(), wide_text.as_ptr()); } } #[inline] pub fn set_visible(&self, visible: bool) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::VISIBLE, visible) }); }); } #[inline] pub fn is_visible(&self) -> Option<bool> { Some(unsafe { IsWindowVisible(self.window.0) == 1 }) } #[inline] pub fn request_redraw(&self) { unsafe { RedrawWindow(self.hwnd(), ptr::null(), 0, RDW_INTERNALPAINT); } } #[inline] pub fn outer_position(&self) -> Result<PhysicalPosition<i32>, NotSupportedError> { util::get_window_rect(self.hwnd()) .map(|rect| Ok(PhysicalPosition::new(rect.left as i32, rect.top as i32))) .expect("Unexpected GetWindowRect failure; please report this error to https://github.com/rust-windowing/winit") } #[inline] pub fn inner_position(&self) -> Result<PhysicalPosition<i32>, NotSupportedError> { let mut position: POINT = unsafe { mem::zeroed() }; if unsafe { ClientToScreen(self.hwnd(), &mut position) } == false.into() { panic!("Unexpected ClientToScreen failure: please report this error to https://github.com/rust-windowing/winit") } Ok(PhysicalPosition::new(position.x as i32, position.y as i32)) } #[inline] pub fn set_outer_position(&self, position: Position) { let (x, y): (i32, i32) = position.to_physical::<i32>(self.scale_factor()).into(); let window_state = Arc::clone(&self.window_state); let window = self.window.clone(); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::MAXIMIZED, false) }); }); unsafe { SetWindowPos( self.hwnd(), 0, x, y, 0, 0, SWP_ASYNCWINDOWPOS | SWP_NOZORDER | SWP_NOSIZE | SWP_NOACTIVATE, ); InvalidateRgn(self.hwnd(), 0, false.into()); } } #[inline] pub fn inner_size(&self) -> PhysicalSize<u32> { let mut rect: RECT = unsafe { mem::zeroed() }; if unsafe { GetClientRect(self.hwnd(), &mut rect) } == false.into() { panic!("Unexpected GetClientRect failure: please report this error to https://github.com/rust-windowing/winit") } PhysicalSize::new( (rect.right - rect.left) as u32, (rect.bottom - rect.top) as u32, ) } #[inline] pub fn outer_size(&self) -> PhysicalSize<u32> { util::get_window_rect(self.hwnd()) .map(|rect| { PhysicalSize::new( (rect.right - rect.left) as u32, (rect.bottom - rect.top) as u32, ) }) .unwrap() } #[inline] pub fn set_inner_size(&self, size: Size) { let scale_factor = self.scale_factor(); let (width, height) = size.to_physical::<u32>(scale_factor).into(); let window_state = Arc::clone(&self.window_state); let window = self.window.clone(); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::MAXIMIZED, false) }); }); util::set_inner_size_physical(self.hwnd(), width, height); } #[inline] pub fn set_min_inner_size(&self, size: Option<Size>) { self.window_state.lock().min_size = size; // Make windows re-check the window size bounds. let size = self.inner_size(); self.set_inner_size(size.into()); } #[inline] pub fn set_max_inner_size(&self, size: Option<Size>) { self.window_state.lock().max_size = size; // Make windows re-check the window size bounds. let size = self.inner_size(); self.set_inner_size(size.into()); } #[inline] pub fn set_resizable(&self, resizable: bool) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::RESIZABLE, resizable) }); }); } #[inline] pub fn is_resizable(&self) -> bool { let window_state = self.window_state.lock(); window_state.window_flags.contains(WindowFlags::RESIZABLE) } /// Returns the `hwnd` of this window. #[inline] pub fn hwnd(&self) -> HWND { self.window.0 } #[inline] pub fn hinstance(&self) -> HINSTANCE { unsafe { super::get_window_long(self.hwnd(), GWLP_HINSTANCE) } } #[inline] pub fn raw_window_handle(&self) -> RawWindowHandle { let mut handle = Win32Handle::empty(); handle.hwnd = self.window.0 as *mut _; handle.hinstance = self.hinstance() as *mut _; RawWindowHandle::Win32(handle) } #[inline] pub fn set_cursor_icon(&self, cursor: CursorIcon) { self.window_state.lock().mouse.cursor = cursor; self.thread_executor.execute_in_thread(move || unsafe { let cursor = LoadCursorW(0, cursor.to_windows_cursor()); SetCursor(cursor); }); } #[inline] pub fn set_cursor_grab(&self, grab: bool) -> Result<(), ExternalError> { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); let (tx, rx) = channel(); self.thread_executor.execute_in_thread(move || { let _ = &window; let result = window_state .lock() .mouse .set_cursor_flags(window.0, |f| f.set(CursorFlags::GRABBED, grab)) .map_err(|e| ExternalError::Os(os_error!(e))); let _ = tx.send(result); }); rx.recv().unwrap() } #[inline] pub fn set_cursor_visible(&self, visible: bool) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); let (tx, rx) = channel(); self.thread_executor.execute_in_thread(move || { let _ = &window; let result = window_state .lock() .mouse .set_cursor_flags(window.0, |f| f.set(CursorFlags::HIDDEN, !visible)) .map_err(|e| e.to_string()); let _ = tx.send(result); }); rx.recv().unwrap().ok(); } #[inline] pub fn scale_factor(&self) -> f64 { self.window_state.lock().scale_factor } #[inline] pub fn set_cursor_position(&self, position: Position) -> Result<(), ExternalError> { let scale_factor = self.scale_factor(); let (x, y) = position.to_physical::<i32>(scale_factor).into(); let mut point = POINT { x, y }; unsafe { if ClientToScreen(self.hwnd(), &mut point) == false.into() { return Err(ExternalError::Os(os_error!(io::Error::last_os_error()))); } if SetCursorPos(point.x, point.y) == false.into() { return Err(ExternalError::Os(os_error!(io::Error::last_os_error()))); } } Ok(()) } #[inline] pub fn drag_window(&self) -> Result<(), ExternalError> { unsafe { let points = { let mut pos = mem::zeroed(); GetCursorPos(&mut pos); pos }; let points = POINTS { x: points.x as i16, y: points.y as i16, }; ReleaseCapture(); PostMessageW( self.hwnd(), WM_NCLBUTTONDOWN, HTCAPTION as WPARAM, &points as *const _ as LPARAM, ); } Ok(()) } #[inline] pub fn set_cursor_hittest(&self, hittest: bool) -> Result<(), ExternalError> { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); self.thread_executor.execute_in_thread(move || { WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::IGNORE_CURSOR_EVENT, !hittest) }); }); Ok(()) } #[inline] pub fn id(&self) -> WindowId { WindowId(self.hwnd()) } #[inline] pub fn set_minimized(&self, minimized: bool) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::MINIMIZED, minimized) }); }); } #[inline] pub fn set_maximized(&self, maximized: bool) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::MAXIMIZED, maximized) }); }); } #[inline] pub fn is_maximized(&self) -> bool { let window_state = self.window_state.lock(); window_state.window_flags.contains(WindowFlags::MAXIMIZED) } #[inline] pub fn fullscreen(&self) -> Option<Fullscreen> { let window_state = self.window_state.lock(); window_state.fullscreen.clone() } #[inline] pub fn set_fullscreen(&self, fullscreen: Option<Fullscreen>) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); let mut window_state_lock = window_state.lock(); let old_fullscreen = window_state_lock.fullscreen.clone(); if window_state_lock.fullscreen == fullscreen { return; } window_state_lock.fullscreen = fullscreen.clone(); drop(window_state_lock); self.thread_executor.execute_in_thread(move || { let _ = &window; // Change video mode if we're transitioning to or from exclusive // fullscreen match (&old_fullscreen, &fullscreen) { (_, Some(Fullscreen::Exclusive(video_mode))) => { let monitor = video_mode.monitor(); let monitor_info = monitor::get_monitor_info(monitor.inner.hmonitor()).unwrap(); let res = unsafe { ChangeDisplaySettingsExW( monitor_info.szDevice.as_ptr(), &*video_mode.video_mode.native_video_mode, 0, CDS_FULLSCREEN, ptr::null(), ) }; debug_assert!(res != DISP_CHANGE_BADFLAGS); debug_assert!(res != DISP_CHANGE_BADMODE); debug_assert!(res != DISP_CHANGE_BADPARAM); debug_assert!(res != DISP_CHANGE_FAILED); assert_eq!(res, DISP_CHANGE_SUCCESSFUL); } (Some(Fullscreen::Exclusive(_)), _) => { let res = unsafe { ChangeDisplaySettingsExW( ptr::null(), ptr::null(), 0, CDS_FULLSCREEN, ptr::null(), ) }; debug_assert!(res != DISP_CHANGE_BADFLAGS); debug_assert!(res != DISP_CHANGE_BADMODE); debug_assert!(res != DISP_CHANGE_BADPARAM); debug_assert!(res != DISP_CHANGE_FAILED); assert_eq!(res, DISP_CHANGE_SUCCESSFUL); } _ => (), } unsafe { // There are some scenarios where calling `ChangeDisplaySettingsExW` takes long // enough to execute that the DWM thinks our program has frozen and takes over // our program's window. When that happens, the `SetWindowPos` call below gets // eaten and the window doesn't get set to the proper fullscreen position. // // Calling `PeekMessageW` here notifies Windows that our process is still running // fine, taking control back from the DWM and ensuring that the `SetWindowPos` call // below goes through. let mut msg = mem::zeroed(); PeekMessageW(&mut msg, 0, 0, 0, PM_NOREMOVE); } // Update window style WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set( WindowFlags::MARKER_EXCLUSIVE_FULLSCREEN, matches!(fullscreen, Some(Fullscreen::Exclusive(_))), ); f.set( WindowFlags::MARKER_BORDERLESS_FULLSCREEN, matches!(fullscreen, Some(Fullscreen::Borderless(_))), ); }); // Mark as fullscreen window wrt to z-order // // this needs to be called before the below fullscreen SetWindowPos as this itself // will generate WM_SIZE messages of the old window size that can race with what we set below unsafe { taskbar_mark_fullscreen(window.0, fullscreen.is_some()); } // Update window bounds match &fullscreen { Some(fullscreen) => { // Save window bounds before entering fullscreen let placement = unsafe { let mut placement = mem::zeroed(); GetWindowPlacement(window.0, &mut placement); placement }; window_state.lock().saved_window = Some(SavedWindow { placement }); let monitor = match &fullscreen { Fullscreen::Exclusive(video_mode) => video_mode.monitor(), Fullscreen::Borderless(Some(monitor)) => monitor.clone(), Fullscreen::Borderless(None) => RootMonitorHandle { inner: monitor::current_monitor(window.0), }, }; let position: (i32, i32) = monitor.position().into(); let size: (u32, u32) = monitor.size().into(); unsafe { SetWindowPos( window.0, 0, position.0, position.1, size.0 as i32, size.1 as i32, SWP_ASYNCWINDOWPOS | SWP_NOZORDER, ); InvalidateRgn(window.0, 0, false.into()); } } None => { let mut window_state_lock = window_state.lock(); if let Some(SavedWindow { placement }) = window_state_lock.saved_window.take() { drop(window_state_lock); unsafe { SetWindowPlacement(window.0, &placement); InvalidateRgn(window.0, 0, false.into()); } } } } }); } #[inline] pub fn set_decorations(&self, decorations: bool) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::DECORATIONS, decorations) }); }); } #[inline] pub fn is_decorated(&self) -> bool { let window_state = self.window_state.lock(); window_state.window_flags.contains(WindowFlags::DECORATIONS) } #[inline] pub fn set_always_on_top(&self, always_on_top: bool) { let window = self.window.clone(); let window_state = Arc::clone(&self.window_state); self.thread_executor.execute_in_thread(move || { let _ = &window; WindowState::set_window_flags(window_state.lock(), window.0, |f| { f.set(WindowFlags::ALWAYS_ON_TOP, always_on_top) }); }); } #[inline] pub fn current_monitor(&self) -> Option<RootMonitorHandle> { Some(RootMonitorHandle { inner: monitor::current_monitor(self.hwnd()), }) } #[inline] pub fn set_window_icon(&self, window_icon: Option<Icon>) { if let Some(ref window_icon) = window_icon { window_icon .inner .set_for_window(self.hwnd(), IconType::Small); } else { icon::unset_for_window(self.hwnd(), IconType::Small); } self.window_state.lock().window_icon = window_icon; } #[inline] pub fn set_enable(&self, enabled: bool) { unsafe { EnableWindow(self.hwnd(), enabled.into()) }; } #[inline] pub fn set_taskbar_icon(&self, taskbar_icon: Option<Icon>) { if let Some(ref taskbar_icon) = taskbar_icon { taskbar_icon .inner .set_for_window(self.hwnd(), IconType::Big); } else { icon::unset_for_window(self.hwnd(), IconType::Big); } self.window_state.lock().taskbar_icon = taskbar_icon; } #[inline] pub fn set_ime_position(&self, spot: Position) { unsafe { ImeContext::current(self.hwnd()).set_ime_position(spot, self.scale_factor()); } } #[inline] pub fn set_ime_allowed(&self, allowed: bool) { self.window_state.lock().ime_allowed = allowed; unsafe { ImeContext::set_ime_allowed(self.hwnd(), allowed); } } #[inline] pub fn request_user_attention(&self, request_type: Option<UserAttentionType>) { let window = self.window.clone(); let active_window_handle = unsafe { GetActiveWindow() }; if window.0 == active_window_handle { return; } self.thread_executor.execute_in_thread(move || unsafe { let _ = &window; let (flags, count) = request_type .map(|ty| match ty { UserAttentionType::Critical => (FLASHW_ALL | FLASHW_TIMERNOFG, u32::MAX), UserAttentionType::Informational => (FLASHW_TRAY | FLASHW_TIMERNOFG, 0), }) .unwrap_or((FLASHW_STOP, 0)); let flash_info = FLASHWINFO { cbSize: mem::size_of::<FLASHWINFO>() as u32, hwnd: window.0, dwFlags: flags, uCount: count, dwTimeout: 0, }; FlashWindowEx(&flash_info); }); } #[inline] pub fn theme(&self) -> Theme { self.window_state.lock().current_theme } #[inline] pub fn set_skip_taskbar(&self, skip: bool) { com_initialized(); unsafe { TASKBAR_LIST.with(|task_bar_list_ptr| { let mut task_bar_list = task_bar_list_ptr.get(); if task_bar_list.is_null() { let hr = CoCreateInstance( &CLSID_TaskbarList, ptr::null_mut(), CLSCTX_ALL, &IID_ITaskbarList, &mut task_bar_list as *mut _ as *mut _, ); let hr_init = (*(*task_bar_list).lpVtbl).HrInit; if hr != S_OK || hr_init(task_bar_list.cast()) != S_OK { // In some old windows, the taskbar object could not be created, we just ignore it return; } task_bar_list_ptr.set(task_bar_list) } task_bar_list = task_bar_list_ptr.get(); if skip { let delete_tab = (*(*task_bar_list).lpVtbl).DeleteTab; delete_tab(task_bar_list, self.window.0); } else { let add_tab = (*(*task_bar_list).lpVtbl).AddTab; add_tab(task_bar_list, self.window.0); } }); } } #[inline] pub fn focus_window(&self) { let window = self.window.clone(); let window_flags = self.window_state.lock().window_flags(); let is_visible = window_flags.contains(WindowFlags::VISIBLE); let is_minimized = window_flags.contains(WindowFlags::MINIMIZED); let is_foreground = window.0 == unsafe { GetForegroundWindow() }; if is_visible && !is_minimized && !is_foreground { unsafe { force_window_active(window.0) }; } } } impl Drop for Window { #[inline] fn drop(&mut self) { unsafe { // The window must be destroyed from the same thread that created it, so we send a // custom message to be handled by our callback to do the actual work. PostMessageW(self.hwnd(), *DESTROY_MSG_ID, 0, 0); } } } /// A simple non-owning wrapper around a window. #[doc(hidden)] #[derive(Clone)] pub struct WindowWrapper(HWND); // Send and Sync are not implemented for HWND and HDC, we have to wrap it and implement them manually. // For more info see: // https://github.com/retep998/winapi-rs/issues/360 // https://github.com/retep998/winapi-rs/issues/396 unsafe impl Sync for WindowWrapper {} unsafe impl Send for WindowWrapper {} pub(super) struct InitData<'a, T: 'static> { // inputs pub event_loop: &'a EventLoopWindowTarget<T>, pub attributes: WindowAttributes, pub pl_attribs: PlatformSpecificWindowBuilderAttributes, pub window_flags: WindowFlags, // outputs pub window: Option<Window>, } impl<'a, T: 'static> InitData<'a, T> { unsafe fn create_window(&self, window: HWND) -> Window { // Register for touch events if applicable { let digitizer = GetSystemMetrics(SM_DIGITIZER) as u32; if digitizer & NID_READY != 0 { RegisterTouchWindow(window, TWF_WANTPALM); } } let dpi = hwnd_dpi(window); let scale_factor = dpi_to_scale_factor(dpi); // If the system theme is dark, we need to set the window theme now // before we update the window flags (and possibly show the // window for the first time). let current_theme = try_theme(window, self.pl_attribs.preferred_theme); let window_state = { let window_state = WindowState::new( &self.attributes, self.pl_attribs.taskbar_icon.clone(), scale_factor, current_theme, self.pl_attribs.preferred_theme, ); let window_state = Arc::new(Mutex::new(window_state)); WindowState::set_window_flags(window_state.lock(), window, |f| *f = self.window_flags); window_state }; enable_non_client_dpi_scaling(window); ImeContext::set_ime_allowed(window, false); Window { window: WindowWrapper(window), window_state, thread_executor: self.event_loop.create_thread_executor(), } } unsafe fn create_window_data(&self, win: &Window) -> event_loop::WindowData<T> { let file_drop_handler = if self.pl_attribs.drag_and_drop { let ole_init_result = OleInitialize(ptr::null_mut()); // It is ok if the initialize result is `S_FALSE` because it might happen that // multiple windows are created on the same thread. if ole_init_result == OLE_E_WRONGCOMPOBJ { panic!("OleInitialize failed! Result was: `OLE_E_WRONGCOMPOBJ`"); } else if ole_init_result == RPC_E_CHANGED_MODE { panic!( "OleInitialize failed! Result was: `RPC_E_CHANGED_MODE`. \ Make sure other crates are not using multithreaded COM library \ on the same thread or disable drag and drop support." ); } let file_drop_runner = self.event_loop.runner_shared.clone(); let file_drop_handler = FileDropHandler::new( win.window.0, Box::new(move |event| { if let Ok(e) = event.map_nonuser_event() { file_drop_runner.send_event(e) } }), ); let handler_interface_ptr = &mut (*file_drop_handler.data).interface as *mut _ as *mut c_void; assert_eq!(RegisterDragDrop(win.window.0, handler_interface_ptr), S_OK); Some(file_drop_handler) } else { None }; self.event_loop.runner_shared.register_window(win.window.0); event_loop::WindowData { window_state: win.window_state.clone(), event_loop_runner: self.event_loop.runner_shared.clone(), _file_drop_handler: file_drop_handler, userdata_removed: Cell::new(false), recurse_depth: Cell::new(0), } } // Returns a pointer to window user data on success. // The user data will be registered for the window and can be accessed within the window event callback. pub unsafe fn on_nccreate(&mut self, window: HWND) -> Option<isize> { let runner = self.event_loop.runner_shared.clone(); let result = runner.catch_unwind(|| { let window = self.create_window(window); let window_data = self.create_window_data(&window); (window, window_data) }); result.map(|(win, userdata)| { self.window = Some(win); let userdata = Box::into_raw(Box::new(userdata)); userdata as _ }) } pub unsafe fn on_create(&mut self) { let win = self.window.as_mut().expect("failed window creation"); // making the window transparent if self.attributes.transparent && !self.pl_attribs.no_redirection_bitmap { // Empty region for the blur effect, so the window is fully transparent let region = CreateRectRgn(0, 0, -1, -1); let bb = DWM_BLURBEHIND { dwFlags: DWM_BB_ENABLE | DWM_BB_BLURREGION, fEnable: true.into(), hRgnBlur: region, fTransitionOnMaximized: false.into(), }; let hr = DwmEnableBlurBehindWindow(win.hwnd(), &bb); if hr < 0 { warn!( "Setting transparent window is failed. HRESULT Code: 0x{:X}", hr ); } DeleteObject(region); } win.set_skip_taskbar(self.pl_attribs.skip_taskbar); let attributes = self.attributes.clone(); // Set visible before setting the size to ensure the // attribute is correctly applied. win.set_visible(attributes.visible); if attributes.fullscreen.is_some() { win.set_fullscreen(attributes.fullscreen); force_window_active(win.window.0); } else { let dimensions = attributes .inner_size .unwrap_or_else(|| PhysicalSize::new(800, 600).into()); win.set_inner_size(dimensions); if attributes.maximized { // Need to set MAXIMIZED after setting `inner_size` as // `Window::set_inner_size` changes MAXIMIZED to false. win.set_maximized(true); } } if let Some(position) = attributes.position { win.set_outer_position(position); } } } unsafe fn init<T>( attributes: WindowAttributes, pl_attribs: PlatformSpecificWindowBuilderAttributes, event_loop: &EventLoopWindowTarget<T>, ) -> Result<Window, RootOsError> where T: 'static, { let title = util::encode_wide(&attributes.title); let class_name = register_window_class::<T>(&attributes.window_icon, &pl_attribs.taskbar_icon); let mut window_flags = WindowFlags::empty(); window_flags.set(WindowFlags::DECORATIONS, attributes.decorations); window_flags.set(WindowFlags::ALWAYS_ON_TOP, attributes.always_on_top); window_flags.set( WindowFlags::NO_BACK_BUFFER, pl_attribs.no_redirection_bitmap, ); window_flags.set(WindowFlags::TRANSPARENT, attributes.transparent); // WindowFlags::VISIBLE and MAXIMIZED are set down below after the window has been configured. window_flags.set(WindowFlags::RESIZABLE, attributes.resizable); let parent = match pl_attribs.parent { Parent::ChildOf(parent) => { window_flags.set(WindowFlags::CHILD, true); if pl_attribs.menu.is_some() { warn!("Setting a menu on a child window is unsupported"); } Some(parent) } Parent::OwnedBy(parent) => { window_flags.set(WindowFlags::POPUP, true); Some(parent) } Parent::None => { window_flags.set(WindowFlags::ON_TASKBAR, true); None } }; let mut initdata = InitData { event_loop, attributes, pl_attribs: pl_attribs.clone(), window_flags, window: None, }; let (style, ex_style) = window_flags.to_window_styles(); let handle = CreateWindowExW( ex_style, class_name.as_ptr(), title.as_ptr(), style, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, parent.unwrap_or(0), pl_attribs.menu.unwrap_or(0), util::get_instance_handle(), &mut initdata as *mut _ as *mut _, ); // If the window creation in `InitData` panicked, then should resume panicking here if let Err(panic_error) = event_loop.runner_shared.take_panic_error() { panic::resume_unwind(panic_error) } if handle == 0 { return Err(os_error!(io::Error::last_os_error())); } // If the handle is non-null, then window creation must have succeeded, which means // that we *must* have populated the `InitData.window` field. Ok(initdata.window.unwrap()) } unsafe fn register_window_class<T: 'static>( window_icon: &Option<Icon>, taskbar_icon: &Option<Icon>, ) -> Vec<u16> { let class_name = util::encode_wide("Window Class"); let h_icon = taskbar_icon .as_ref() .map(|icon| icon.inner.as_raw_handle()) .unwrap_or(0); let h_icon_small = window_icon .as_ref() .map(|icon| icon.inner.as_raw_handle()) .unwrap_or(0); let class = WNDCLASSEXW { cbSize: mem::size_of::<WNDCLASSEXW>() as u32, style: CS_HREDRAW | CS_VREDRAW, lpfnWndProc: Some(super::event_loop::public_window_callback::<T>), cbClsExtra: 0, cbWndExtra: 0, hInstance: util::get_instance_handle(), hIcon: h_icon, hCursor: 0, // must be null in order for cursor state to work properly hbrBackground: 0, lpszMenuName: ptr::null(), lpszClassName: class_name.as_ptr(), hIconSm: h_icon_small, }; // We ignore errors because registering the same window class twice would trigger // an error, and because errors here are detected during CreateWindowEx anyway. // Also since there is no weird element in the struct, there is no reason for this // call to fail. RegisterClassExW(&class); class_name } struct ComInitialized(*mut ()); impl Drop for ComInitialized { fn drop(&mut self) { unsafe { CoUninitialize() }; } } thread_local! { static COM_INITIALIZED: ComInitialized = { unsafe { CoInitializeEx(ptr::null(), COINIT_APARTMENTTHREADED); ComInitialized(ptr::null_mut()) } }; static TASKBAR_LIST: Cell<*mut ITaskbarList> = Cell::new(ptr::null_mut()); static TASKBAR_LIST2: Cell<*mut ITaskbarList2> = Cell::new(ptr::null_mut()); } pub fn com_initialized() { COM_INITIALIZED.with(|_| {}); } // Reference Implementation: // https://github.com/chromium/chromium/blob/f18e79d901f56154f80eea1e2218544285e62623/ui/views/win/fullscreen_handler.cc // // As per MSDN marking the window as fullscreen should ensure that the // taskbar is moved to the bottom of the Z-order when the fullscreen window // is activated. If the window is not fullscreen, the Shell falls back to // heuristics to determine how the window should be treated, which means // that it could still consider the window as fullscreen. :( unsafe fn taskbar_mark_fullscreen(handle: HWND, fullscreen: bool) { com_initialized(); TASKBAR_LIST2.with(|task_bar_list2_ptr| { let mut task_bar_list2 = task_bar_list2_ptr.get(); if task_bar_list2.is_null() { let hr = CoCreateInstance( &CLSID_TaskbarList, ptr::null_mut(), CLSCTX_ALL, &IID_ITaskbarList2, &mut task_bar_list2 as *mut _ as *mut _, ); let hr_init = (*(*task_bar_list2).lpVtbl).parent.HrInit; if hr != S_OK || hr_init(task_bar_list2.cast()) != S_OK { // In some old windows, the taskbar object could not be created, we just ignore it return; } task_bar_list2_ptr.set(task_bar_list2) } task_bar_list2 = task_bar_list2_ptr.get(); let mark_fullscreen_window = (*(*task_bar_list2).lpVtbl).MarkFullscreenWindow; mark_fullscreen_window(task_bar_list2, handle, if fullscreen { 1 } else { 0 }); }) } unsafe fn force_window_active(handle: HWND) { // In some situation, calling SetForegroundWindow could not bring up the window, // This is a little hack which can "steal" the foreground window permission // We only call this function in the window creation, so it should be fine. // See : https://stackoverflow.com/questions/10740346/setforegroundwindow-only-working-while-visual-studio-is-open let alt_sc = MapVirtualKeyW(VK_MENU as u32, MAPVK_VK_TO_VSC); let inputs = [ INPUT { r#type: INPUT_KEYBOARD, Anonymous: INPUT_0 { ki: KEYBDINPUT { wVk: VK_LMENU, wScan: alt_sc as u16, dwFlags: KEYEVENTF_EXTENDEDKEY, dwExtraInfo: 0, time: 0, }, }, }, INPUT { r#type: INPUT_KEYBOARD, Anonymous: INPUT_0 { ki: KEYBDINPUT { wVk: VK_LMENU, wScan: alt_sc as u16, dwFlags: KEYEVENTF_EXTENDEDKEY | KEYEVENTF_KEYUP, dwExtraInfo: 0, time: 0, }, }, }, ]; // Simulate a key press and release SendInput( inputs.len() as u32, inputs.as_ptr(), mem::size_of::<INPUT>() as i32, ); SetForegroundWindow(handle); }
35.352682
124
0.564708
eb77a39ecb3c1c5e34f7747fc5bfb2c402df87a0
4,534
//! //! # Delete Topic Request //! //! Delete topic request handler. Lookup topic in local metadata, grab its K8 context //! and send K8 a delete message. //! use dataplane::ErrorCode; use tracing::{debug, instrument, trace}; use std::io::Error; use dataplane::api::{RequestMessage, ResponseMessage}; use fluvio_sc_schema::{Status}; use fluvio_sc_schema::objects::{ObjectApiDeleteRequest}; use fluvio_auth::{AuthContext}; use crate::services::auth::AuthServiceContext; /// Handler for delete topic request #[instrument(skip(request, auth_ctx))] pub async fn handle_delete_request<AC: AuthContext>( request: RequestMessage<ObjectApiDeleteRequest>, auth_ctx: &AuthServiceContext<AC>, ) -> Result<ResponseMessage<Status>, Error> { let (header, del_req) = request.get_header_request(); debug!("del request: {:#?}", del_req); let status = match del_req { ObjectApiDeleteRequest::Topic(req) => { super::topic::handle_delete_topic(req.key(), auth_ctx).await? } ObjectApiDeleteRequest::CustomSpu(req) => { super::spu::handle_un_register_custom_spu_request(req.key(), auth_ctx).await? } ObjectApiDeleteRequest::SpuGroup(req) => { super::spg::handle_delete_spu_group(req.key(), auth_ctx).await? } ObjectApiDeleteRequest::ManagedConnector(req) => { super::connector::handle_delete_managed_connector(req.key(), auth_ctx).await? } ObjectApiDeleteRequest::SmartModule(req) => { super::smartmodule::handle_delete_smartmodule(req.key(), auth_ctx).await? } ObjectApiDeleteRequest::TableFormat(req) => { super::tableformat::handle_delete_tableformat(req.key(), auth_ctx).await? } ObjectApiDeleteRequest::DerivedStream(req) => { let name = req.key(); delete_handler::process( name.clone(), auth_ctx, auth_ctx.global_ctx.derivedstreams(), |_| ErrorCode::DerivedStreamObjectError, || ErrorCode::SmartModuleNotFound { name }, ) .await? } }; trace!("flv delete topics resp {:#?}", status); Ok(ResponseMessage::from_header(&header, status)) } mod delete_handler { use std::{ convert::{TryFrom, TryInto}, io::{Error, ErrorKind}, }; use dataplane::ErrorCode; use fluvio_stream_dispatcher::store::StoreContext; use tracing::{debug, trace, instrument}; use fluvio_sc_schema::{AdminSpec, Status}; use fluvio_auth::{AuthContext, InstanceAction}; use fluvio_controlplane_metadata::{core::Spec, extended::SpecExt}; use crate::services::auth::AuthServiceContext; /// Handler for object delete #[instrument(skip(auth_ctx, object_ctx, error_code, not_found_code))] pub async fn process<AC: AuthContext, S, F, G>( name: String, auth_ctx: &AuthServiceContext<AC>, object_ctx: &StoreContext<S>, error_code: F, not_found_code: G, ) -> Result<Status, Error> where S: AdminSpec + SpecExt, <S as Spec>::IndexKey: TryFrom<String>, F: FnOnce(Error) -> ErrorCode, G: FnOnce() -> ErrorCode, { use dataplane::ErrorCode; debug!(ty = %S::LABEL,%name,"deleting"); if let Ok(authorized) = auth_ctx .auth .allow_instance_action(S::OBJECT_TYPE, InstanceAction::Delete, &name) .await { if !authorized { trace!("authorization failed"); return Ok(Status::new( name.clone(), ErrorCode::PermissionDenied, Some(String::from("permission denied")), )); } } else { return Err(Error::new(ErrorKind::Interrupted, "authorization io error")); } let key = name .clone() .try_into() .map_err(|_err| Error::new(ErrorKind::InvalidData, "not convertible"))?; let status = if object_ctx.store().value(&key).await.is_some() { if let Err(err) = object_ctx.delete(key).await { let err_string = err.to_string(); Status::new(name.clone(), error_code(err), Some(err_string)) } else { Status::new_ok(name) } } else { Status::new(name, not_found_code(), Some("not found".to_owned())) }; Ok(status) } }
33.585185
89
0.595721
fb197d99de4a68543bf874f3f2d6e203c25b73e9
151
mod builder; pub mod http; pub mod middleware; pub use self::builder::NewServiceExt; #[doc(no_inline)] pub use tower_service::{NewService, Service};
16.777778
45
0.754967
e8f3c8124b682a35c77eb88313f106757bd0fc03
14,438
#![cfg_attr(not(feature = "std"), no_std)] use sp_std::{fmt::Debug, prelude::*}; use sp_runtime::{ RuntimeDebug, Percent, traits::{ Hash, AtLeast32BitUnsigned, Zero, // Saturating, CheckedSub, CheckedAdd, }, }; use frame_support::{ traits::{ Currency, ReservableCurrency, ExistenceRequirement::{KeepAlive}, }, }; use codec::{Encode, Decode, HasCompact, FullCodec}; use mc_support::{ primitives::{ DungeonReportState }, traits::{ ManagerAccessor, FeaturedAssets, RandomNumber, RandomHash, }, }; pub use pallet::*; type BalanceOf<T> = <<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance; type AssetBalance<T> = <<T as Config>::FeaturedAssets as FeaturedAssets<<T as frame_system::Config>::AccountId>>::Balance; type AssetAmountPair<T> = ( <<T as Config>::FeaturedAssets as FeaturedAssets<<T as frame_system::Config>::AccountId>>::AssetId, AssetBalance<T>, ); #[frame_support::pallet] pub mod pallet { use frame_system::pallet_prelude::*; use frame_support::{ pallet_prelude::*, weights::{DispatchClass, Pays}, dispatch::DispatchResultWithPostInfo, }; use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet<T>(_); /// The module configuration trait. #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; /// The arithmetic type of dungeon identifier. type DungeonId: Member + Parameter + Default + Copy + HasCompact + FullCodec; /// The units in which we record balances. type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; /// The currency mechanism. type Currency: ReservableCurrency<Self::AccountId>; /// The manager origin. type ManagerOrigin: EnsureOrigin<Self::Origin>; /// Asset Admin is outer module type AssetAdmin: ManagerAccessor<Self::AccountId>; /// Something that provides randomness number in the runtime. type RandomNumber: RandomNumber<u32>; /// Something that provides randomness hash in the runtime. type RandomHash: RandomHash<Self::Hash>; /// The featured asset module type FeaturedAssets: FeaturedAssets<Self::AccountId>; /// blocks for closing after ticket bought type TicketClosingGap: Get<Self::BlockNumber>; /// blocks for closing after playing type TicketPlayingGap: Get<Self::BlockNumber>; /// percent for asset distribution type AssetDistributionPercent: Get<Percent>; } #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { // TODO on finalized } #[pallet::call] impl<T: Config> Pallet<T> { /// create new dungeon #[pallet::weight((10_000 + T::DbWeight::get().writes(1), DispatchClass::Normal, Pays::No))] pub(super) fn create( origin: OriginFor<T>, #[pallet::compact] id: T::DungeonId, ticket_price: BalanceOf<T>, provide_assets: Vec<AssetAmountPair<T>>, ) -> DispatchResultWithPostInfo { // T::ManagerOrigin::ensure_origin(origin)?; let origin = ensure_signed(origin)?; ensure!(T::AssetAdmin::is_admin(&origin), Error::<T>::NoPermission); ensure!(!Dungeons::<T>::contains_key(id), Error::<T>::DungeonExists); let all_asset_in_using = provide_assets.iter().all(|one| T::FeaturedAssets::is_in_using(one.0)); ensure!(all_asset_in_using, Error::<T>::AssetNotUsed); // create dungeon Dungeons::<T>::insert(id, DungeonInfo { ticket_price: ticket_price, provide_assets: provide_assets, report_ranks: Vec::new(), }); Self::deposit_event(Event::DungeonCreated(id, ticket_price)); Ok(().into()) } /// modify dungeon price #[pallet::weight((10_000 + T::DbWeight::get().writes(1), DispatchClass::Normal, Pays::No))] pub(super) fn modify_price( origin: OriginFor<T>, #[pallet::compact] id: T::DungeonId, ticket_price: BalanceOf<T>, ) -> DispatchResultWithPostInfo { // T::ManagerOrigin::ensure_origin(origin)?; let origin = ensure_signed(origin)?; ensure!(T::AssetAdmin::is_admin(&origin), Error::<T>::NoPermission); Dungeons::<T>::try_mutate(id, |maybe_dungeon| { let dungeon = maybe_dungeon.as_mut().ok_or(Error::<T>::UnknownDungeon)?; let old_ticket_price = dungeon.ticket_price; dungeon.ticket_price = ticket_price; Self::deposit_event(Event::DungeonTicketModified(id, old_ticket_price, ticket_price)); Ok(().into()) }) } /// modify assets supply #[pallet::weight((10_000 + T::DbWeight::get().writes(1), DispatchClass::Normal, Pays::No))] pub(super) fn modify_assets_supply( origin: OriginFor<T>, #[pallet::compact] id: T::DungeonId, provide_assets: Vec<AssetAmountPair<T>>, ) -> DispatchResultWithPostInfo { // T::ManagerOrigin::ensure_origin(origin)?; let origin = ensure_signed(origin)?; ensure!(T::AssetAdmin::is_admin(&origin), Error::<T>::NoPermission); Dungeons::<T>::try_mutate(id, |maybe_dungeon| { let dungeon = maybe_dungeon.as_mut().ok_or(Error::<T>::UnknownDungeon)?; dungeon.provide_assets = provide_assets; Self::deposit_event(Event::DungeonInfoModified(id)); Ok(().into()) }) } /// modify final distribution #[pallet::weight((10_000 + T::DbWeight::get().writes(1), DispatchClass::Normal, Pays::No))] pub(super) fn modify_distribution_ratio( origin: OriginFor<T>, #[pallet::compact] id: T::DungeonId, report_ranks: Vec<(DungeonReportState, Percent)>, ) -> DispatchResultWithPostInfo { // T::ManagerOrigin::ensure_origin(origin)?; let origin = ensure_signed(origin)?; ensure!(T::AssetAdmin::is_admin(&origin), Error::<T>::NoPermission); Dungeons::<T>::try_mutate(id, |maybe_dungeon| { let dungeon = maybe_dungeon.as_mut().ok_or(Error::<T>::UnknownDungeon)?; dungeon.report_ranks = report_ranks; Self::deposit_event(Event::DungeonReportRanksModified(id)); Ok(().into()) }) } /// buy dungeon ticket #[pallet::weight((10_000 + T::DbWeight::get().writes(1), DispatchClass::Normal, Pays::No))] pub(super) fn buy_ticket( origin: OriginFor<T>, #[pallet::compact] id: T::DungeonId, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(!T::AssetAdmin::is_admin(&who), Error::<T>::NoPermission); ensure!(Dungeons::<T>::contains_key(id), Error::<T>::UnknownDungeon); let dungeon = Dungeons::<T>::get(id).ok_or(Error::<T>::UnknownDungeon)?; // ensure ticket price T::Currency::reserve(&who, dungeon.ticket_price)?; // now let current_block = frame_system::Module::<T>::block_number(); // build instance let ins = DungeonInstance { id: id, player: who.clone(), created_at: current_block, status: DungeonInstanceStatus::Booked{ close_due: current_block + T::TicketClosingGap::get() }, }; let ticket_id = T::Hashing::hash_of(&(id.encode(), &ins.player, &ins.created_at)); // insert new instance DungeonInstances::<T>::insert(ticket_id, ins); Self::deposit_event(Event::DungeonTicketBought(id, who, ticket_id)); Ok(().into()) } /// begin a dungeon instance /// transfer balance, issue assets, update status #[pallet::weight((10_000 + T::DbWeight::get().writes(1), DispatchClass::Normal, Pays::No))] pub(super) fn start( origin: OriginFor<T>, ticket_id: T::Hash, ) -> DispatchResultWithPostInfo { let server = ensure_signed(origin)?; ensure!(T::AssetAdmin::is_admin(&server), Error::<T>::NoPermission); // ensure dungeon instance exists DungeonInstances::<T>::try_mutate_exists(ticket_id, |maybe_instance| -> DispatchResultWithPostInfo { let ins = maybe_instance.as_mut().ok_or(Error::<T>::UnknownInstance)?; let dungeon = Dungeons::<T>::get(ins.id).ok_or(Error::<T>::UnknownDungeon)?; // now block let current_block = frame_system::Module::<T>::block_number(); // ensure current status is booked match ins.status { DungeonInstanceStatus::Booked{ close_due } => { ensure!(close_due > current_block, Error::<T>::InstanceIsClosed); // TODO 自动关闭过期的 dungeon instance }, _ => return Err(Error::<T>::InstanceStatusShouldBeBooked.into()), }; // Step.1 unreserve player's balance T::Currency::unreserve(&ins.player, dungeon.ticket_price); // Step.2 transfer player's balance to server let _ = T::Currency::transfer(&ins.player, &server, dungeon.ticket_price, KeepAlive)?; // Step.3 server mint asset to it self. for (asset_id, amount) in dungeon.provide_assets.iter() { T::FeaturedAssets::mint(*asset_id, &server, *amount)?; } // Step.4 set instance status ins.status = DungeonInstanceStatus::Started { server: server.clone(), close_due: current_block + T::TicketPlayingGap::get(), }; // send started event Self::deposit_event(Event::DungeonStarted(ins.id, ins.player.clone(), server, ticket_id)); Ok(().into()) }) } /// end a dungeon instance #[pallet::weight((10_000 + T::DbWeight::get().writes(1), DispatchClass::Normal, Pays::No))] pub(super) fn end( origin: OriginFor<T>, ticket_id: T::Hash, result: DungeonReportState, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(T::AssetAdmin::is_admin(&who), Error::<T>::NoPermission); // ensure dungeon instance exists DungeonInstances::<T>::try_mutate_exists(ticket_id, |maybe_instance| -> DispatchResultWithPostInfo { let ins = maybe_instance.as_mut().ok_or(Error::<T>::UnknownInstance)?; let dungeon = Dungeons::<T>::get(ins.id).ok_or(Error::<T>::UnknownDungeon)?; // now block let current_block = frame_system::Module::<T>::block_number(); // ensure current status is started let server_id = match ins.status.clone() { DungeonInstanceStatus::Started{ server, close_due, } => { // 自动关闭过期的 dungeon instance ensure!(close_due > current_block, Error::<T>::InstanceIsClosed); ensure!(server.clone() == who, Error::<T>::InstanceServerShouldBeSame); server }, _ => return Err(Error::<T>::InstanceStatusShouldBeStarted.into()), }; // Step.1 get percent by result let percent = match result { DungeonReportState::Lose => Percent::from_percent(0), DungeonReportState::PerfectWin => Percent::from_percent(100), DungeonReportState::ScoredWin(score) => score, }; // Step.2 distribute asset to players according to result let distribute_percent = T::AssetDistributionPercent::get(); for (asset_id, amount) in dungeon.provide_assets.iter() { let player_amount: AssetBalance<T> = distribute_percent.mul_ceil(percent.mul_ceil(*amount)); let treasury_amount: AssetBalance<T> = distribute_percent.mul_ceil(*amount - player_amount); // FIXME 需要确保转账成功 if !player_amount.is_zero() { T::FeaturedAssets::transfer(*asset_id, &server_id, &ins.player, player_amount)?; } if !treasury_amount.is_zero() { T::FeaturedAssets::transfer(*asset_id, &server_id, &T::AssetAdmin::get_owner_id(), treasury_amount)?; } } // Step.2 set instance status ins.status = DungeonInstanceStatus::Ended { server: server_id.clone(), report_at: current_block, report_state: result, }; // send started event Self::deposit_event(Event::DungeonEnded(ins.id, ins.player.clone(), server_id, ticket_id, percent)); Ok(().into()) }) } } #[pallet::storage] #[pallet::getter(fn dungeons)] /// dungeon definations pub(super) type Dungeons<T: Config> = StorageMap< _, Blake2_128Concat, T::DungeonId, DungeonInfo<BalanceOf<T>, AssetAmountPair<T>> >; #[pallet::storage] #[pallet::getter(fn dungeon_instances)] /// dungeon instances pub(super) type DungeonInstances<T: Config> = StorageMap< _, Blake2_128Concat, T::Hash, DungeonInstance<T::DungeonId, T::AccountId, T::BlockNumber> >; #[pallet::event] #[pallet::metadata(T::AccountId = "AccountId", T::Balance = "Balance", T::DungeonId = "DungeonId")] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { /// Some dungeon were created. \[dungeon_id, ticket_price\] DungeonCreated(T::DungeonId, BalanceOf<T>), /// Some dungeon's price were modified. \[dungeon_id, old_ticket_price, new_ticket_price\] DungeonTicketModified(T::DungeonId, BalanceOf<T>, BalanceOf<T>), /// Some dungeon's info were modified. \[dungeon_id\] DungeonInfoModified(T::DungeonId), /// Some dungeon's report ranks were modified. \[dungeon_id\] DungeonReportRanksModified(T::DungeonId), /// a dungeon instance ticket bought. \[dungeon_id, player_id, ticket_id\] DungeonTicketBought(T::DungeonId, T::AccountId, T::Hash), /// a dungeon started. \[dungeon_id, player_id, server_id, ticket_id\] DungeonStarted(T::DungeonId, T::AccountId, T::AccountId, T::Hash), /// a dungeon ended. \[dungeon_id, player_id, server_id, ticket_id, score\] DungeonEnded(T::DungeonId, T::AccountId, T::AccountId, T::Hash, Percent), } #[pallet::error] pub enum Error<T> { NoPermission, DungeonExists, AssetNotUsed, UnknownDungeon, UnknownInstance, InstanceIsClosed, InstanceStatusShouldBeBooked, InstanceStatusShouldBeStarted, InstanceServerShouldBeSame, } } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] pub struct DungeonInfo< Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, AssetAmountPair, > { /// The balance ticket_price: Balance, provide_assets: Vec<AssetAmountPair>, report_ranks: Vec<(DungeonReportState, Percent)>, } /// The status of a dungeon instance #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub enum DungeonInstanceStatus<AccountId, BlockNumber> { Booked { close_due: BlockNumber, }, Started { server: AccountId, close_due: BlockNumber, }, Ended { server: AccountId, report_at: BlockNumber, report_state: DungeonReportState, }, Closed, } /// The info of a dungeon instance #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] pub struct DungeonInstance< DungeonId: Encode + Decode + Clone + Debug + Eq + PartialEq, AccountId: Encode + Decode + Clone + Eq + PartialEq, BlockNumber: Encode + Decode + Clone + Eq + PartialEq, > { /// the id of dungeon id: DungeonId, player: AccountId, created_at: BlockNumber, status: DungeonInstanceStatus<AccountId, BlockNumber>, } // The main implementation block for the module. impl<T: Config> Pallet<T> { // Public immutables // TODO }
32.96347
122
0.691855
033ff8c6c67a9d62f873df8a7b4356e2aa5e1fee
36
/// Software key impl pub mod soft;
12
21
0.694444
1eb59889a8674111ef393566ef48a485bacf3801
2,662
#[derive(Copy, Clone, Debug, PartialEq)] pub struct PermutationElement<'a> { direction: bool, value: &'a str, } pub type PermutationElements<'a> = Vec<PermutationElement<'a>>; pub type PermutationElementRefs<'a> = Vec<&'a mut PermutationElement<'a>>; #[derive(Debug, PartialEq)] pub struct Permuter<'a> { current: &'a mut PermutationElementRefs<'a>, done: bool, } impl Permuter<'_> { /** * A Permuter iterates over all possible permutations of the given array * of elements. * */ pub fn new<'a>(current: &'a mut PermutationElementRefs<'a>) -> Permuter<'a> { // indicates whether there are more permutations let done = false; Permuter { current, done } } pub fn permutation_elements<'a>(list: &mut Vec<&'a str>) -> PermutationElements<'a> { // original array list.sort_unstable(); let mut elements = Vec::with_capacity(list.len()); for value in list.iter() { elements.push(PermutationElement { direction: true, value, }) } elements } } impl Iterator for Permuter<'_> { type Item = Vec<String>; fn next(&mut self) -> Option<Self::Item> { if self.done { return None; } // copy current permutation to return it let current = &mut self.current[..]; let rval = current.iter().map(|x| x.value.to_string()).collect(); /* Calculate the next permutation using the Steinhaus-Johnson-Trotter permutation algorithm. */ // get largest mobile element k // (mobile: element is greater than the one it is looking at) let mut k: Option<PermutationElement> = None; let mut k_is_none = true; let mut pos = 0; let length = current.len(); for (i, permutator_element) in current.iter().enumerate() { let element = permutator_element.value; let left = permutator_element.direction; k_is_none = k.is_none(); if (k_is_none || element > k.unwrap().value) && ((left && i > 0 && element > current[i - 1].value) || (!left && i < (length - 1) && element > current[i + 1].value)) { k = Some(*current[i]); pos = i; } } // no more permutations if k_is_none { self.done = true; } else { let k = k.unwrap(); // swap k and the element it is looking at let swap = if k.direction { pos - 1 } else { pos + 1 }; current.swap(pos, swap); // reverse the direction of all elements larger than k for permutator_element in current.iter_mut() { if permutator_element.value > k.value { permutator_element.direction = !permutator_element.direction; } } } Some(rval) } }
28.021053
87
0.613449
2f2bf0b72cde60b0254382941afc0e74bd73ce39
759
1 1.7830037230933333 0 2 1.7830037230933333 17 3 1.7830037230933333 49 4 1.7830037230933333 19 5 1.7830037230933333 51 6 0.45367267733333344 101 7 0.24822168499199995 107 8 0.2998809452444444 103 9 0.27078669279999995 87 10 0.35009989324800006 106 11 0.36153334124088893 110 12 0.5044219813333333 102 13 0.4228977980444445 86 14 0.21972759904000003 93 15 0.2447894504888889 85 16 0.33897751608888893 78 17 0.23103606041600003 91 18 0.3188559254755556 96 19 0.4603000783111112 88 20 0.42095855593244447 116 21 0.5620364772693333 108 22 0.38974332839822223 76 23 0.33955455559111114 80 24 0.22977211564444444 69 25 0.6469147968 113 26 0.2116657717777778 68 27 0.36168292727466667 94 28 0.6957446353351112 115 29 0.7475009231075557 109 30 0.38924082827377776 79
24.483871
26
0.841897
ebb198b8179cf31413cbdc9d74e0e335d04d234d
634
//! The `std::ops` module. use crate::{ContextError, Module, Protocol, Range, Value}; /// Construct the `std::ops` module. pub fn module() -> Result<Module, ContextError> { let mut module = Module::with_crate_item("std", &["ops"]); module.ty::<Range>()?; module.inst_fn("contains_int", Range::contains_int)?; module.field_fn(Protocol::SET, "start", range_set_start)?; module.field_fn(Protocol::SET, "end", range_set_end)?; Ok(module) } fn range_set_start(range: &mut Range, start: Option<Value>) { range.start = start; } fn range_set_end(range: &mut Range, end: Option<Value>) { range.end = end; }
28.818182
62
0.662461
d7f78dca1fb5a7fd1ee5fa5896644cf2987cce36
38,741
use std::collections::hash_map::{Entry, HashMap}; use std::fs::File; use std::path::Path; use std::sync::{Arc, RwLock}; use std::{fs, thread}; use crossbeam_channel::{Receiver, Sender}; use heed::types::{Str, Unit}; use heed::{CompactionOption, Result as ZResult}; use log::debug; use meilisearch_schema::Schema; use crate::{store, update, Index, MResult}; pub type BoxUpdateFn = Box<dyn Fn(&str, update::ProcessedUpdateResult) + Send + Sync + 'static>; type ArcSwapFn = arc_swap::ArcSwapOption<BoxUpdateFn>; pub struct MainT; pub struct UpdateT; pub struct Database { env: heed::Env, update_env: heed::Env, common_store: heed::PolyDatabase, indexes_store: heed::Database<Str, Unit>, indexes: RwLock<HashMap<String, (Index, thread::JoinHandle<MResult<()>>)>>, update_fn: Arc<ArcSwapFn>, } pub struct DatabaseOptions { pub main_map_size: usize, pub update_map_size: usize, } impl Default for DatabaseOptions { fn default() -> DatabaseOptions { DatabaseOptions { main_map_size: 100 * 1024 * 1024 * 1024, //100Gb update_map_size: 100 * 1024 * 1024 * 1024, //100Gb } } } macro_rules! r#break_try { ($expr:expr, $msg:tt) => { match $expr { core::result::Result::Ok(val) => val, core::result::Result::Err(err) => { log::error!(concat!($msg, ": {}"), err); break; } } }; } pub enum UpdateEvent { NewUpdate, MustClear, } pub type UpdateEvents = Receiver<UpdateEvent>; pub type UpdateEventsEmitter = Sender<UpdateEvent>; fn update_awaiter( receiver: UpdateEvents, env: heed::Env, update_env: heed::Env, index_uid: &str, update_fn: Arc<ArcSwapFn>, index: Index, ) -> MResult<()> { let mut receiver = receiver.into_iter(); while let Some(event) = receiver.next() { // if we receive a *MustClear* event, clear the index and break the loop if let UpdateEvent::MustClear = event { let mut writer = env.typed_write_txn::<MainT>()?; let mut update_writer = update_env.typed_write_txn::<UpdateT>()?; store::clear(&mut writer, &mut update_writer, &index)?; writer.commit()?; update_writer.commit()?; debug!("store {} cleared", index_uid); break } loop { // We instantiate a *write* transaction to *block* the thread // until the *other*, notifiying, thread commits let result = update_env.typed_write_txn::<UpdateT>(); let update_reader = break_try!(result, "LMDB read transaction (update) begin failed"); // retrieve the update that needs to be processed let result = index.updates.first_update(&update_reader); let (update_id, update) = match break_try!(result, "pop front update failed") { Some(value) => value, None => { debug!("no more updates"); break; } }; // do not keep the reader for too long update_reader.abort(); // instantiate a transaction to touch to the main env let result = env.typed_write_txn::<MainT>(); let mut main_writer = break_try!(result, "LMDB nested write transaction failed"); // try to apply the update to the database using the main transaction let result = update::update_task(&mut main_writer, &index, update_id, update); let status = break_try!(result, "update task failed"); // commit the main transaction if the update was successful, abort it otherwise if status.error.is_none() { break_try!(main_writer.commit(), "commit nested transaction failed"); } else { main_writer.abort() } // now that the update has been processed we can instantiate // a transaction to move the result to the updates-results store let result = update_env.typed_write_txn::<UpdateT>(); let mut update_writer = break_try!(result, "LMDB write transaction begin failed"); // definitely remove the update from the updates store index.updates.del_update(&mut update_writer, update_id)?; // write the result of the updates-results store let updates_results = index.updates_results; let result = updates_results.put_update_result(&mut update_writer, update_id, &status); // always commit the main transaction, even if the update was unsuccessful break_try!(result, "update result store commit failed"); break_try!(update_writer.commit(), "update transaction commit failed"); // call the user callback when the update and the result are written consistently if let Some(ref callback) = *update_fn.load() { (callback)(index_uid, status); } } } debug!("update loop system stopped"); Ok(()) } impl Database { pub fn open_or_create(path: impl AsRef<Path>, options: DatabaseOptions) -> MResult<Database> { let main_path = path.as_ref().join("main"); let update_path = path.as_ref().join("update"); fs::create_dir_all(&main_path)?; let env = heed::EnvOpenOptions::new() .map_size(options.main_map_size) .max_dbs(3000) .open(main_path)?; fs::create_dir_all(&update_path)?; let update_env = heed::EnvOpenOptions::new() .map_size(options.update_map_size) .max_dbs(3000) .open(update_path)?; let common_store = env.create_poly_database(Some("common"))?; let indexes_store = env.create_database::<Str, Unit>(Some("indexes"))?; let update_fn = Arc::new(ArcSwapFn::empty()); // list all indexes that needs to be opened let mut must_open = Vec::new(); let reader = env.read_txn()?; for result in indexes_store.iter(&reader)? { let (index_uid, _) = result?; must_open.push(index_uid.to_owned()); } reader.abort(); // open the previously aggregated indexes let mut indexes = HashMap::new(); for index_uid in must_open { let (sender, receiver) = crossbeam_channel::unbounded(); let index = match store::open(&env, &update_env, &index_uid, sender.clone())? { Some(index) => index, None => { log::warn!( "the index {} doesn't exist or has not all the databases", index_uid ); continue; } }; let env_clone = env.clone(); let update_env_clone = update_env.clone(); let index_clone = index.clone(); let name_clone = index_uid.clone(); let update_fn_clone = update_fn.clone(); let handle = thread::spawn(move || { update_awaiter( receiver, env_clone, update_env_clone, &name_clone, update_fn_clone, index_clone, ) }); // send an update notification to make sure that // possible pre-boot updates are consumed sender.send(UpdateEvent::NewUpdate).unwrap(); let result = indexes.insert(index_uid, (index, handle)); assert!( result.is_none(), "The index should not have been already open" ); } Ok(Database { env, update_env, common_store, indexes_store, indexes: RwLock::new(indexes), update_fn, }) } pub fn open_index(&self, name: impl AsRef<str>) -> Option<Index> { let indexes_lock = self.indexes.read().unwrap(); match indexes_lock.get(name.as_ref()) { Some((index, ..)) => Some(index.clone()), None => None, } } pub fn create_index(&self, name: impl AsRef<str>) -> MResult<Index> { let name = name.as_ref(); let mut indexes_lock = self.indexes.write().unwrap(); match indexes_lock.entry(name.to_owned()) { Entry::Occupied(_) => Err(crate::Error::IndexAlreadyExists), Entry::Vacant(entry) => { let (sender, receiver) = crossbeam_channel::unbounded(); let index = store::create(&self.env, &self.update_env, name, sender)?; let mut writer = self.env.typed_write_txn::<MainT>()?; self.indexes_store.put(&mut writer, name, &())?; index.main.put_name(&mut writer, name)?; index.main.put_created_at(&mut writer)?; index.main.put_updated_at(&mut writer)?; index.main.put_schema(&mut writer, &Schema::new())?; let env_clone = self.env.clone(); let update_env_clone = self.update_env.clone(); let index_clone = index.clone(); let name_clone = name.to_owned(); let update_fn_clone = self.update_fn.clone(); let handle = thread::spawn(move || { update_awaiter( receiver, env_clone, update_env_clone, &name_clone, update_fn_clone, index_clone, ) }); writer.commit()?; entry.insert((index.clone(), handle)); Ok(index) } } } pub fn delete_index(&self, name: impl AsRef<str>) -> MResult<bool> { let name = name.as_ref(); let mut indexes_lock = self.indexes.write().unwrap(); match indexes_lock.remove_entry(name) { Some((name, (index, handle))) => { // remove the index name from the list of indexes // and clear all the LMDB dbi let mut writer = self.env.write_txn()?; self.indexes_store.delete(&mut writer, &name)?; writer.commit()?; // send a stop event to the update loop of the index index.updates_notifier.send(UpdateEvent::MustClear).unwrap(); drop(indexes_lock); // join the update loop thread to ensure it is stopped handle.join().unwrap()?; Ok(true) } None => Ok(false), } } pub fn set_update_callback(&self, update_fn: BoxUpdateFn) { let update_fn = Some(Arc::new(update_fn)); self.update_fn.swap(update_fn); } pub fn unset_update_callback(&self) { self.update_fn.swap(None); } pub fn main_read_txn(&self) -> heed::Result<heed::RoTxn<MainT>> { self.env.typed_read_txn::<MainT>() } pub fn main_write_txn(&self) -> heed::Result<heed::RwTxn<MainT>> { self.env.typed_write_txn::<MainT>() } pub fn update_read_txn(&self) -> heed::Result<heed::RoTxn<UpdateT>> { self.update_env.typed_read_txn::<UpdateT>() } pub fn update_write_txn(&self) -> heed::Result<heed::RwTxn<UpdateT>> { self.update_env.typed_write_txn::<UpdateT>() } pub fn copy_and_compact_to_path<P: AsRef<Path>>(&self, path: P) -> ZResult<(File, File)> { let path = path.as_ref(); let env_path = path.join("main"); let env_update_path = path.join("update"); fs::create_dir(&env_path)?; fs::create_dir(&env_update_path)?; let env_path = env_path.join("data.mdb"); let env_file = self.env.copy_to_path(&env_path, CompactionOption::Enabled)?; let env_update_path = env_update_path.join("data.mdb"); match self.update_env.copy_to_path(env_update_path, CompactionOption::Enabled) { Ok(update_env_file) => Ok((env_file, update_env_file)), Err(e) => { fs::remove_file(env_path)?; Err(e) }, } } pub fn indexes_uids(&self) -> Vec<String> { let indexes = self.indexes.read().unwrap(); indexes.keys().cloned().collect() } pub fn common_store(&self) -> heed::PolyDatabase { self.common_store } } #[cfg(test)] mod tests { use super::*; use crate::bucket_sort::SortResult; use crate::criterion::{self, CriteriaBuilder}; use crate::update::{ProcessedUpdateResult, UpdateStatus}; use crate::settings::Settings; use crate::{Document, DocumentId}; use serde::de::IgnoredAny; use std::sync::mpsc; #[test] fn valid_updates() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn invalid_updates() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Failed { content }) if content.error.is_some()); } #[test] fn ignored_words_too_long() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name"], "displayedAttributes": ["name"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "s̷̡̢̡̧̺̜̞͕͉͉͕̜͔̟̼̥̝͍̟̖͔͔̪͉̲̹̝̣̖͎̞̤̥͓͎̭̩͕̙̩̿̀̋̅̈́̌́̏̍̄̽͂̆̾̀̿̕̚̚͜͠͠ͅͅļ̵̨̨̨̰̦̻̳̖̳͚̬̫͚̦͖͈̲̫̣̩̥̻̙̦̱̼̠̖̻̼̘̖͉̪̜̠̙͖̙̩͔̖̯̩̲̿̽͋̔̿̍̓͂̍̿͊͆̃͗̔̎͐͌̾̆͗́̆̒̔̾̅̚̚͜͜ͅͅī̵̛̦̅̔̓͂͌̾́͂͛̎̋͐͆̽̂̋̋́̾̀̉̓̏̽́̑̀͒̇͋͛̈́̃̉̏͊̌̄̽̿̏̇͘̕̚̕p̶̧̛̛̖̯̗͕̝̗̭̱͙̖̗̟̟̐͆̊̂͐̋̓̂̈́̓͊̆͌̾̾͐͋͗͌̆̿̅͆̈́̈́̉͋̍͊͗̌̓̅̈̎̇̃̎̈́̉̐̋͑̃͘̕͘d̴̢̨̛͕̘̯͖̭̮̝̝̐̊̈̅̐̀͒̀́̈́̀͌̽͛͆͑̀̽̿͛̃̋̇̎̀́̂́͘͠͝ǫ̵̨̛̮̩̘͚̬̯̖̱͍̼͑͑̓̐́̑̿̈́̔͌̂̄͐͝ģ̶̧̜͇̣̭̺̪̺̖̻͖̮̭̣̙̻͒͊͗̓̓͒̀̀ͅ", }); additions.update_document(doc1); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn add_schema_attributes_at_end() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let _update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description", "age", "sex"], "displayedAttributes": ["name", "description", "age", "sex"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", "age": 21, "sex": "Male", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", "age": 23, "sex": "Male", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort(); // even try to search for a document let reader = db.main_read_txn().unwrap(); let SortResult {documents, .. } = index.query_builder().query(&reader, "21 ", 0..20).unwrap(); assert_matches!(documents.len(), 1); reader.abort(); // try to introduce attributes in the middle of the schema let settings = { let data = r#" { "searchableAttributes": ["name", "description", "city", "age", "sex"], "displayedAttributes": ["name", "description", "city", "age", "sex"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn deserialize_documents() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort(); let reader = db.main_read_txn().unwrap(); let document: Option<IgnoredAny> = index.document(&reader, None, DocumentId(25)).unwrap(); assert!(document.is_none()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(7_900_334_843_754_999_545)) .unwrap(); assert!(document.is_some()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(8_367_468_610_878_465_872)) .unwrap(); assert!(document.is_some()); } #[test] fn partial_document_update() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description", "id"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort(); let reader = db.main_read_txn().unwrap(); let document: Option<IgnoredAny> = index.document(&reader, None, DocumentId(25)).unwrap(); assert!(document.is_none()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(7_900_334_843_754_999_545)) .unwrap(); assert!(document.is_some()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(8_367_468_610_878_465_872)) .unwrap(); assert!(document.is_some()); reader.abort(); let mut partial_additions = index.documents_partial_addition(); // DocumentId(7900334843754999545) let partial_doc1 = serde_json::json!({ "id": 123, "description": "I am the new Marvin", }); // DocumentId(8367468610878465872) let partial_doc2 = serde_json::json!({ "id": 234, "description": "I am the new Kevin", }); partial_additions.update_document(partial_doc1); partial_additions.update_document(partial_doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = partial_additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort(); let reader = db.main_read_txn().unwrap(); let document: Option<serde_json::Value> = index .document(&reader, None, DocumentId(7_900_334_843_754_999_545)) .unwrap(); let new_doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "I am the new Marvin", }); assert_eq!(document, Some(new_doc1)); let document: Option<serde_json::Value> = index .document(&reader, None, DocumentId(8_367_468_610_878_465_872)) .unwrap(); let new_doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "I am the new Kevin", }); assert_eq!(document, Some(new_doc2)); } #[test] fn delete_index() { let dir = tempfile::tempdir().unwrap(); let database = Arc::new(Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap()); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let db_cloned = database.clone(); let update_fn = move |name: &str, update: ProcessedUpdateResult| { // try to open index to trigger a lock let _ = db_cloned.open_index(name); sender.send(update.update_id).unwrap() }; // create the index let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // add documents to the index let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // delete the index let deleted = database.delete_index("test").unwrap(); assert!(deleted); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let result = database.open_index("test"); assert!(result.is_none()); } #[test] fn check_number_ordering() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "rankingRules": [ "typo", "words", "proximity", "attribute", "wordsPosition", "exactness", "desc(release_date)" ], "searchableAttributes": ["name", "release_date"], "displayedAttributes": ["name", "release_date"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Kevin the first", "release_date": -10000, }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin the second", "release_date": 10000, }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let reader = db.main_read_txn().unwrap(); let schema = index.main.schema(&reader).unwrap().unwrap(); let ranked_map = index.main.ranked_map(&reader).unwrap().unwrap(); let criteria = CriteriaBuilder::new() .add( criterion::SortByAttr::lower_is_better(&ranked_map, &schema, "release_date") .unwrap(), ) .add(criterion::DocumentId) .build(); let builder = index.query_builder_with_criteria(criteria); let SortResult {documents, .. } = builder.query(&reader, "Kevin", 0..20).unwrap(); let mut iter = documents.into_iter(); assert_matches!( iter.next(), Some(Document { id: DocumentId(7_900_334_843_754_999_545), .. }) ); assert_matches!( iter.next(), Some(Document { id: DocumentId(8_367_468_610_878_465_872), .. }) ); assert_matches!(iter.next(), None); } }
35.315406
391
0.558349
de8116be6fc593786aafcd80dfb6c0061fb60310
1,496
// This file is part of ucx. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/ucx/master/COPYRIGHT. No part of ucx, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2016 The developers of ucx. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/ucx/master/COPYRIGHT. #[repr(i8)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum ucs_status_t { UCS_OK = 0, UCS_INPROGRESS = 1, UCS_ERR_NO_MESSAGE = -1, UCS_ERR_NO_RESOURCE = -2, UCS_ERR_IO_ERROR = -3, UCS_ERR_NO_MEMORY = -4, UCS_ERR_INVALID_PARAM = -5, UCS_ERR_UNREACHABLE = -6, UCS_ERR_INVALID_ADDR = -7, UCS_ERR_NOT_IMPLEMENTED = -8, UCS_ERR_MESSAGE_TRUNCATED = -9, UCS_ERR_NO_PROGRESS = -10, UCS_ERR_BUFFER_TOO_SMALL = -11, UCS_ERR_NO_ELEM = -12, UCS_ERR_SOME_CONNECTS_FAILED = -13, UCS_ERR_NO_DEVICE = -14, UCS_ERR_BUSY = -15, UCS_ERR_CANCELED = -16, UCS_ERR_SHMEM_SEGMENT = -17, UCS_ERR_ALREADY_EXISTS = -18, UCS_ERR_OUT_OF_RANGE = -19, UCS_ERR_TIMED_OUT = -20, UCS_ERR_EXCEEDS_LIMIT = -21, UCS_ERR_UNSUPPORTED = -22, UCS_ERR_FIRST_LINK_FAILURE = -40, UCS_ERR_LAST_LINK_FAILURE = -59, UCS_ERR_FIRST_ENDPOINT_FAILURE = -60, UCS_ERR_LAST_ENDPOINT_FAILURE = -79, UCS_ERR_ENDPOINT_TIMEOUT = -80, UCS_ERR_LAST = -100, }
37.4
364
0.766711
21477ddec3aaa493035b3461bc5029e29654eec4
445
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // This test checks that Kani injects a reachability check for the assert_eq // macro. The test has an unreachable assert_eq statement which should be // reported as UNREACHABLE #[kani::proof] fn main() { let x: i32 = kani::any(); let y = if x > 10 { 15 } else { 33 }; if y > 50 { assert_eq!(y, 55); } }
29.666667
76
0.65618
0373175e25644f22badc4c1aa69aac23501f3873
9,560
#![doc = include_str!("../README.md")] #![cfg_attr(docsrs, feature(doc_cfg))] // Due to `schema_introspection` test. #![cfg_attr(test, recursion_limit = "256")] #![warn(missing_docs)] // Required for using `juniper_codegen` macros inside this crate to resolve // absolute `::juniper` path correctly, without errors. extern crate self as juniper; use std::fmt; // These are required by the code generated via the `juniper_codegen` macros. #[doc(hidden)] pub use {async_trait::async_trait, futures, serde, static_assertions as sa}; #[doc(inline)] pub use futures::future::{BoxFuture, LocalBoxFuture}; // Depend on juniper_codegen and re-export everything in it. // This allows users to just depend on juniper and get the derive // functionality automatically. pub use juniper_codegen::{ graphql_interface, graphql_object, graphql_scalar, graphql_subscription, graphql_union, GraphQLEnum, GraphQLInputObject, GraphQLInterface, GraphQLObject, GraphQLScalar, GraphQLUnion, }; #[doc(hidden)] #[macro_use] pub mod macros; mod ast; pub mod executor; mod introspection; pub mod parser; pub(crate) mod schema; mod types; mod util; pub mod validation; mod value; // This needs to be public until docs have support for private modules: // https://github.com/rust-lang/cargo/issues/1520 pub mod http; pub mod integrations; #[cfg(all(test, not(feature = "expose-test-schema")))] mod tests; #[cfg(feature = "expose-test-schema")] pub mod tests; #[cfg(test)] mod executor_tests; // Needs to be public because macros use it. pub use crate::util::to_camel_case; use crate::{ executor::{execute_validated_query, get_operation}, introspection::{INTROSPECTION_QUERY, INTROSPECTION_QUERY_WITHOUT_DESCRIPTIONS}, parser::parse_document_source, validation::{validate_input_values, visit_all_rules, ValidatorContext}, }; pub use crate::{ ast::{ Definition, Document, FromInputValue, InputValue, Operation, OperationType, Selection, ToInputValue, Type, }, executor::{ Applies, Context, ExecutionError, ExecutionResult, Executor, FieldError, FieldResult, FromContext, IntoFieldError, IntoResolvable, LookAheadArgument, LookAheadMethods, LookAheadSelection, LookAheadValue, OwnedExecutor, Registry, ValuesStream, Variables, }, introspection::IntrospectionFormat, macros::helper::subscription::{ExtractTypeFromStream, IntoFieldResult}, parser::{ParseError, ScalarToken, Spanning}, schema::{ meta, model::{RootNode, SchemaType}, }, types::{ async_await::{GraphQLTypeAsync, GraphQLValueAsync}, base::{Arguments, GraphQLType, GraphQLValue, TypeKind}, marker::{self, GraphQLInterface, GraphQLObject, GraphQLUnion}, nullable::Nullable, scalars::{EmptyMutation, EmptySubscription, ID}, subscriptions::{ ExecutionOutput, GraphQLSubscriptionType, GraphQLSubscriptionValue, SubscriptionConnection, SubscriptionCoordinator, }, }, validation::RuleError, value::{DefaultScalarValue, Object, ParseScalarResult, ParseScalarValue, ScalarValue, Value}, }; /// An error that prevented query execution #[derive(Debug, PartialEq)] #[allow(missing_docs)] pub enum GraphQLError<'a> { ParseError(Spanning<ParseError<'a>>), ValidationError(Vec<RuleError>), NoOperationProvided, MultipleOperationsProvided, UnknownOperationName, IsSubscription, NotSubscription, } impl<'a> fmt::Display for GraphQLError<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { GraphQLError::ParseError(error) => write!(f, "{}", error), GraphQLError::ValidationError(errors) => { for error in errors { writeln!(f, "{}", error)?; } Ok(()) } GraphQLError::NoOperationProvided => write!(f, "No operation provided"), GraphQLError::MultipleOperationsProvided => write!(f, "Multiple operations provided"), GraphQLError::UnknownOperationName => write!(f, "Unknown operation name"), GraphQLError::IsSubscription => write!(f, "Operation is a subscription"), GraphQLError::NotSubscription => write!(f, "Operation is not a subscription"), } } } impl<'a> std::error::Error for GraphQLError<'a> {} /// Execute a query synchronously in a provided schema pub fn execute_sync<'a, S, QueryT, MutationT, SubscriptionT>( document_source: &'a str, operation_name: Option<&str>, root_node: &'a RootNode<QueryT, MutationT, SubscriptionT, S>, variables: &Variables<S>, context: &QueryT::Context, ) -> Result<(Value<S>, Vec<ExecutionError<S>>), GraphQLError<'a>> where S: ScalarValue, QueryT: GraphQLType<S>, MutationT: GraphQLType<S, Context = QueryT::Context>, SubscriptionT: GraphQLType<S, Context = QueryT::Context>, { let document = parse_document_source(document_source, &root_node.schema)?; { let mut ctx = ValidatorContext::new(&root_node.schema, &document); visit_all_rules(&mut ctx, &document); let errors = ctx.into_errors(); if !errors.is_empty() { return Err(GraphQLError::ValidationError(errors)); } } let operation = get_operation(&document, operation_name)?; { let errors = validate_input_values(variables, operation, &root_node.schema); if !errors.is_empty() { return Err(GraphQLError::ValidationError(errors)); } } execute_validated_query(&document, operation, root_node, variables, context) } /// Execute a query in a provided schema pub async fn execute<'a, S, QueryT, MutationT, SubscriptionT>( document_source: &'a str, operation_name: Option<&str>, root_node: &'a RootNode<'a, QueryT, MutationT, SubscriptionT, S>, variables: &Variables<S>, context: &QueryT::Context, ) -> Result<(Value<S>, Vec<ExecutionError<S>>), GraphQLError<'a>> where QueryT: GraphQLTypeAsync<S>, QueryT::TypeInfo: Sync, QueryT::Context: Sync, MutationT: GraphQLTypeAsync<S, Context = QueryT::Context>, MutationT::TypeInfo: Sync, SubscriptionT: GraphQLType<S, Context = QueryT::Context> + Sync, SubscriptionT::TypeInfo: Sync, S: ScalarValue + Send + Sync, { let document = parse_document_source(document_source, &root_node.schema)?; { let mut ctx = ValidatorContext::new(&root_node.schema, &document); visit_all_rules(&mut ctx, &document); let errors = ctx.into_errors(); if !errors.is_empty() { return Err(GraphQLError::ValidationError(errors)); } } let operation = get_operation(&document, operation_name)?; { let errors = validate_input_values(variables, operation, &root_node.schema); if !errors.is_empty() { return Err(GraphQLError::ValidationError(errors)); } } executor::execute_validated_query_async(&document, operation, root_node, variables, context) .await } /// Resolve subscription into `ValuesStream` pub async fn resolve_into_stream<'a, S, QueryT, MutationT, SubscriptionT>( document_source: &'a str, operation_name: Option<&str>, root_node: &'a RootNode<'a, QueryT, MutationT, SubscriptionT, S>, variables: &Variables<S>, context: &'a QueryT::Context, ) -> Result<(Value<ValuesStream<'a, S>>, Vec<ExecutionError<S>>), GraphQLError<'a>> where QueryT: GraphQLTypeAsync<S>, QueryT::TypeInfo: Sync, QueryT::Context: Sync, MutationT: GraphQLTypeAsync<S, Context = QueryT::Context>, MutationT::TypeInfo: Sync, SubscriptionT: GraphQLSubscriptionType<S, Context = QueryT::Context>, SubscriptionT::TypeInfo: Sync, S: ScalarValue + Send + Sync, { let document: crate::ast::OwnedDocument<'a, S> = parse_document_source(document_source, &root_node.schema)?; { let mut ctx = ValidatorContext::new(&root_node.schema, &document); visit_all_rules(&mut ctx, &document); let errors = ctx.into_errors(); if !errors.is_empty() { return Err(GraphQLError::ValidationError(errors)); } } let operation = get_operation(&document, operation_name)?; { let errors = validate_input_values(variables, operation, &root_node.schema); if !errors.is_empty() { return Err(GraphQLError::ValidationError(errors)); } } executor::resolve_validated_subscription(&document, operation, root_node, variables, context) .await } /// Execute the reference introspection query in the provided schema pub fn introspect<'a, S, QueryT, MutationT, SubscriptionT>( root_node: &'a RootNode<QueryT, MutationT, SubscriptionT, S>, context: &QueryT::Context, format: IntrospectionFormat, ) -> Result<(Value<S>, Vec<ExecutionError<S>>), GraphQLError<'a>> where S: ScalarValue, QueryT: GraphQLType<S>, MutationT: GraphQLType<S, Context = QueryT::Context>, SubscriptionT: GraphQLType<S, Context = QueryT::Context>, { execute_sync( match format { IntrospectionFormat::All => INTROSPECTION_QUERY, IntrospectionFormat::WithoutDescriptions => INTROSPECTION_QUERY_WITHOUT_DESCRIPTIONS, }, None, root_node, &Variables::new(), context, ) } impl<'a> From<Spanning<ParseError<'a>>> for GraphQLError<'a> { fn from(f: Spanning<ParseError<'a>>) -> GraphQLError<'a> { GraphQLError::ParseError(f) } }
33.426573
98
0.677092
48f6e029ee95f8d6cb2fe4571301028499fd397a
365
fn main() { println!(r"cargo:rustc-link-search=./sysroot/opt/vc/lib"); println!(r"cargo:rustc-link-search=./sysroot/opt/vc/include"); println!(r"cargo:rustc-link-search=./sysroot/usr/lib"); println!(r"cargo:rustc-link-search=./sysroot/usr/include"); println!(r"cargo:rustc-link-lib=brcmEGL"); println!(r"cargo:rustc-link-lib=brcmGLESv2"); }
40.555556
66
0.682192
5d2924993ae1e7a8dbdeeac15ddb08c639ffe5e3
1,286
extern crate rand; extern crate ropey; use std::io::Cursor; use ropey::Rope; const TEXT: &str = include_str!("test_text.txt"); #[test] fn from_reader_01() { // Make a reader from our in-memory text let text_reader = Cursor::new(TEXT); let rope = Rope::from_reader(text_reader).unwrap(); assert_eq!(rope, TEXT); // Make sure the tree is sound rope.assert_integrity(); rope.assert_invariants(); } #[test] fn from_reader_02() { // Make a reader from blank text let text_reader = Cursor::new(""); let rope = Rope::from_reader(text_reader).unwrap(); assert_eq!(rope, ""); // Make sure the tree is sound rope.assert_integrity(); rope.assert_invariants(); } #[test] fn from_reader_03() { // Make text with a utf8-invalid byte sequence in it. let mut text = Vec::new(); text.extend(TEXT.as_bytes()); text[6132] = 0b1100_0000; text[6133] = 0b0100_0000; // Make a reader from the invalid data let text_reader = Cursor::new(text); // Try to read the data, and verify that we get the right error. if let Err(e) = Rope::from_reader(text_reader) { assert_eq!(e.kind(), std::io::ErrorKind::InvalidData); } else { panic!("Should have returned an invalid data error.") } }
22.964286
68
0.643857
de3108878bf24ffdd017686450409fee4eef86ed
35,943
//! Instruction types #![allow(clippy::too_many_arguments)] use { crate::{ find_deposit_authority_program_address, find_stake_program_address, find_transient_stake_program_address, find_withdraw_authority_program_address, stake_program, state::{Fee, StakePool, ValidatorList}, MAX_VALIDATORS_TO_UPDATE, }, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, solana_program::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, system_program, sysvar, }, }; /// Defines which validator vote account is set during the /// `SetPreferredValidator` instruction #[repr(C)] #[derive(Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema)] pub enum PreferredValidatorType { /// Set preferred validator for deposits Deposit, /// Set preferred validator for withdraws Withdraw, } /// Instructions supported by the StakePool program. #[repr(C)] #[derive(Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema)] pub enum StakePoolInstruction { /// Initializes a new StakePool. /// /// 0. `[w]` New StakePool to create. /// 1. `[s]` Manager /// 2. `[]` Staker /// 3. `[w]` Uninitialized validator stake list storage account /// 4. `[]` Reserve stake account must be initialized, have zero balance, /// and staker / withdrawer authority set to pool withdraw authority. /// 5. `[]` Pool token mint. Must have zero supply, owned by withdraw authority. /// 6. `[]` Pool account to deposit the generated fee for manager. /// 7. `[]` Clock sysvar /// 8. `[]` Rent sysvar /// 9. `[]` Token program id /// 10. `[]` (Optional) Deposit authority that must sign all deposits. /// Defaults to the program address generated using /// `find_deposit_authority_program_address`, making deposits permissionless. Initialize { /// Fee assessed as percentage of perceived rewards #[allow(dead_code)] // but it's not fee: Fee, /// Maximum expected number of validators #[allow(dead_code)] // but it's not max_validators: u32, }, /// (Staker only) Creates new program account for accumulating stakes for /// a particular validator /// /// 0. `[]` Stake pool account this stake will belong to /// 1. `[s]` Staker /// 2. `[ws]` Funding account (must be a system account) /// 3. `[w]` Stake account to be created /// 4. `[]` Validator this stake account will vote for /// 5. `[]` Rent sysvar /// 6. `[]` Stake History sysvar /// 7. `[]` Stake Config sysvar /// 8. `[]` System program /// 9. `[]` Stake program CreateValidatorStakeAccount, /// (Staker only) Adds stake account delegated to validator to the pool's /// list of managed validators. /// /// The stake account must have the rent-exempt amount plus at least 1 SOL, /// and at most 1.001 SOL. /// /// Once we delegate even 1 SOL, it will accrue rewards one epoch later, /// so we'll have more than 1 active SOL at this point. /// At 10% annualized rewards, 1 epoch of 2 days will accrue /// 0.000547945 SOL, so we check that it is at least 1 SOL, and at most /// 1.001 SOL. /// /// 0. `[w]` Stake pool /// 1. `[s]` Staker /// 2. `[]` Stake pool withdraw authority /// 3. `[w]` Validator stake list storage account /// 4. `[w]` Stake account to add to the pool, its withdraw authority must /// be set to the staker /// 5. `[]` Clock sysvar /// 6. '[]' Sysvar stake history account /// 7. `[]` Stake program AddValidatorToPool, /// (Staker only) Removes validator from the pool /// /// Only succeeds if the validator stake account has the minimum of 1 SOL /// plus the rent-exempt amount. /// /// 0. `[w]` Stake pool /// 1. `[s]` Staker /// 2. `[]` Stake pool withdraw authority /// 3. `[]` New withdraw/staker authority to set in the stake account /// 4. `[w]` Validator stake list storage account /// 5. `[w]` Stake account to remove from the pool /// 6. `[]` Transient stake account, to check that that we're not trying to activate /// 7. '[]' Sysvar clock /// 8. `[]` Stake program id, RemoveValidatorFromPool, /// (Staker only) Decrease active stake on a validator, eventually moving it to the reserve /// /// Internally, this instruction splits a validator stake account into its /// corresponding transient stake account and deactivates it. /// /// In order to rebalance the pool without taking custody, the staker needs /// a way of reducing the stake on a stake account. This instruction splits /// some amount of stake, up to the total activated stake, from the canonical /// validator stake account, into its "transient" stake account. /// /// The instruction only succeeds if the transient stake account does not /// exist. The amount of lamports to move must be at least rent-exemption /// plus 1 lamport. /// /// 0. `[]` Stake pool /// 1. `[s]` Stake pool staker /// 2. `[]` Stake pool withdraw authority /// 3. `[w]` Validator list /// 4. `[w]` Canonical stake account to split from /// 5. `[w]` Transient stake account to receive split /// 6. `[]` Clock sysvar /// 7. `[]` Rent sysvar /// 8. `[]` System program /// 9. `[]` Stake program /// userdata: amount of lamports to split into the transient stake account DecreaseValidatorStake(u64), /// (Staker only) Increase stake on a validator from the reserve account /// /// Internally, this instruction splits reserve stake into a transient stake /// account and delegate to the appropriate validator. `UpdateValidatorListBalance` /// will do the work of merging once it's ready. /// /// This instruction only succeeds if the transient stake account does not exist. /// The minimum amount to move is rent-exemption plus 1 SOL in order to avoid /// issues on credits observed when merging active stakes later. /// /// 0. `[]` Stake pool /// 1. `[s]` Stake pool staker /// 2. `[]` Stake pool withdraw authority /// 3. `[w]` Validator list /// 4. `[w]` Stake pool reserve stake /// 5. `[w]` Transient stake account /// 6. `[]` Validator vote account to delegate to /// 7. '[]' Clock sysvar /// 8. '[]' Rent sysvar /// 9. `[]` Stake History sysvar /// 10. `[]` Stake Config sysvar /// 11. `[]` System program /// 12. `[]` Stake program /// userdata: amount of lamports to split into the transient stake account IncreaseValidatorStake(u64), /// (Staker only) Set the preferred deposit or withdraw stake account for the /// stake pool /// /// In order to avoid users abusing the stake pool as a free conversion /// between SOL staked on different validators, the staker can force all /// deposits and/or withdraws to go to one chosen account, or unset that account. /// /// 0. `[]` Stake pool /// 1. `[s]` Stake pool staker /// 2. `[w]` Validator list /// /// Fails if the validator is not part of the stake pool. SetPreferredValidator { /// Affected operation (deposit or withdraw) #[allow(dead_code)] // but it's not validator_type: PreferredValidatorType, /// Validator vote account that deposits or withdraws must go through, /// unset with None #[allow(dead_code)] // but it's not validator_vote_address: Option<Pubkey>, }, /// Updates balances of validator and transient stake accounts in the pool /// /// While going through the pairs of validator and transient stake accounts, /// if the transient stake is inactive, it is merged into the reserve stake /// account. If the transient stake is active and has matching credits /// observed, it is merged into the canonical validator stake account. In /// all other states, nothing is done, and the balance is simply added to /// the canonical stake account balance. /// /// 0. `[]` Stake pool /// 1. `[]` Stake pool withdraw authority /// 2. `[w]` Validator stake list storage account /// 3. `[w]` Reserve stake account /// 4. `[]` Sysvar clock /// 5. `[]` Sysvar stake history /// 6. `[]` Stake program /// 7. ..7+N ` [] N pairs of validator and transient stake accounts UpdateValidatorListBalance { /// Index to start updating on the validator list #[allow(dead_code)] // but it's not start_index: u32, /// If true, don't try merging transient stake accounts into the reserve or /// validator stake account. Useful for testing or if a particular stake /// account is in a bad state, but we still want to update #[allow(dead_code)] // but it's not no_merge: bool, }, /// Updates total pool balance based on balances in the reserve and validator list /// /// 0. `[w]` Stake pool /// 1. `[]` Stake pool withdraw authority /// 2. `[w]` Validator stake list storage account /// 3. `[]` Reserve stake account /// 4. `[w]` Account to receive pool fee tokens /// 5. `[w]` Pool mint account /// 6. `[]` Sysvar clock account /// 7. `[]` Pool token program UpdateStakePoolBalance, /// Deposit some stake into the pool. The output is a "pool" token representing ownership /// into the pool. Inputs are converted to the current ratio. /// /// 0. `[w]` Stake pool /// 1. `[w]` Validator stake list storage account /// 2. `[]` Stake pool deposit authority /// 3. `[]` Stake pool withdraw authority /// 4. `[w]` Stake account to join the pool (withdraw authority for the stake account should be first set to the stake pool deposit authority) /// 5. `[w]` Validator stake account for the stake account to be merged with /// 6. `[w]` Reserve stake account, to withdraw rent exempt reserve /// 7. `[w]` User account to receive pool tokens /// 8. `[w]` Pool token mint account /// 9. '[]' Sysvar clock account /// 10. '[]' Sysvar stake history account /// 11. `[]` Pool token program id, /// 12. `[]` Stake program id, Deposit, /// Withdraw the token from the pool at the current ratio. /// /// Succeeds if the stake account has enough SOL to cover the desired amount /// of pool tokens, and if the withdrawal keeps the total staked amount /// above the minimum of rent-exempt amount + 1 SOL. /// /// A validator stake account can be withdrawn from freely, and the reserve /// can only be drawn from if there is no active stake left, where all /// validator accounts are left with 1 lamport. /// /// 0. `[w]` Stake pool /// 1. `[w]` Validator stake list storage account /// 2. `[]` Stake pool withdraw authority /// 3. `[w]` Validator or reserve stake account to split /// 4. `[w]` Unitialized stake account to receive withdrawal /// 5. `[]` User account to set as a new withdraw authority /// 6. `[s]` User transfer authority, for pool token account /// 7. `[w]` User account with pool tokens to burn from /// 8. `[w]` Pool token mint account /// 9. `[]` Sysvar clock account (required) /// 10. `[]` Pool token program id /// 11. `[]` Stake program id, /// userdata: amount of pool tokens to withdraw Withdraw(u64), /// (Manager only) Update manager /// /// 0. `[w]` StakePool /// 1. `[s]` Manager /// 2. '[]` New manager pubkey /// 3. '[]` New manager fee account SetManager, /// (Manager only) Update fee /// /// 0. `[w]` StakePool /// 1. `[s]` Manager /// 2. `[]` Sysvar clock SetFee { /// Fee assessed as percentage of perceived rewards #[allow(dead_code)] // but it's not fee: Fee, }, /// (Manager or staker only) Update staker /// /// 0. `[w]` StakePool /// 1. `[s]` Manager or current staker /// 2. '[]` New staker pubkey SetStaker, } /// Creates an 'initialize' instruction. pub fn initialize( program_id: &Pubkey, stake_pool: &Pubkey, manager: &Pubkey, staker: &Pubkey, validator_list: &Pubkey, reserve_stake: &Pubkey, pool_mint: &Pubkey, manager_pool_account: &Pubkey, token_program_id: &Pubkey, deposit_authority: Option<Pubkey>, fee: Fee, max_validators: u32, ) -> Instruction { let init_data = StakePoolInstruction::Initialize { fee, max_validators, }; let data = init_data.try_to_vec().unwrap(); let mut accounts = vec![ AccountMeta::new(*stake_pool, true), AccountMeta::new_readonly(*manager, true), AccountMeta::new_readonly(*staker, false), AccountMeta::new(*validator_list, false), AccountMeta::new_readonly(*reserve_stake, false), AccountMeta::new_readonly(*pool_mint, false), AccountMeta::new_readonly(*manager_pool_account, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(*token_program_id, false), ]; if let Some(deposit_authority) = deposit_authority { accounts.push(AccountMeta::new_readonly(deposit_authority, true)); } Instruction { program_id: *program_id, accounts, data, } } /// Creates `CreateValidatorStakeAccount` instruction (create new stake account for the validator) pub fn create_validator_stake_account( program_id: &Pubkey, stake_pool: &Pubkey, staker: &Pubkey, funder: &Pubkey, stake_account: &Pubkey, validator: &Pubkey, ) -> Instruction { let accounts = vec![ AccountMeta::new_readonly(*stake_pool, false), AccountMeta::new_readonly(*staker, true), AccountMeta::new(*funder, true), AccountMeta::new(*stake_account, false), AccountMeta::new_readonly(*validator, false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), AccountMeta::new_readonly(stake_program::config_id(), false), AccountMeta::new_readonly(system_program::id(), false), AccountMeta::new_readonly(stake_program::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::CreateValidatorStakeAccount .try_to_vec() .unwrap(), } } /// Creates `AddValidatorToPool` instruction (add new validator stake account to the pool) pub fn add_validator_to_pool( program_id: &Pubkey, stake_pool: &Pubkey, staker: &Pubkey, stake_pool_withdraw: &Pubkey, validator_list: &Pubkey, stake_account: &Pubkey, ) -> Instruction { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new_readonly(*staker, true), AccountMeta::new_readonly(*stake_pool_withdraw, false), AccountMeta::new(*validator_list, false), AccountMeta::new(*stake_account, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), AccountMeta::new_readonly(stake_program::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::AddValidatorToPool .try_to_vec() .unwrap(), } } /// Creates `RemoveValidatorFromPool` instruction (remove validator stake account from the pool) pub fn remove_validator_from_pool( program_id: &Pubkey, stake_pool: &Pubkey, staker: &Pubkey, stake_pool_withdraw: &Pubkey, new_stake_authority: &Pubkey, validator_list: &Pubkey, stake_account: &Pubkey, transient_stake_account: &Pubkey, ) -> Instruction { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new_readonly(*staker, true), AccountMeta::new_readonly(*stake_pool_withdraw, false), AccountMeta::new_readonly(*new_stake_authority, false), AccountMeta::new(*validator_list, false), AccountMeta::new(*stake_account, false), AccountMeta::new_readonly(*transient_stake_account, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(stake_program::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::RemoveValidatorFromPool .try_to_vec() .unwrap(), } } /// Creates `DecreaseValidatorStake` instruction (rebalance from validator account to /// transient account) pub fn decrease_validator_stake( program_id: &Pubkey, stake_pool: &Pubkey, staker: &Pubkey, stake_pool_withdraw_authority: &Pubkey, validator_list: &Pubkey, validator_stake: &Pubkey, transient_stake: &Pubkey, lamports: u64, ) -> Instruction { let accounts = vec![ AccountMeta::new_readonly(*stake_pool, false), AccountMeta::new_readonly(*staker, true), AccountMeta::new_readonly(*stake_pool_withdraw_authority, false), AccountMeta::new(*validator_list, false), AccountMeta::new(*validator_stake, false), AccountMeta::new(*transient_stake, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(system_program::id(), false), AccountMeta::new_readonly(stake_program::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::DecreaseValidatorStake(lamports) .try_to_vec() .unwrap(), } } /// Creates `IncreaseValidatorStake` instruction (rebalance from reserve account to /// transient account) pub fn increase_validator_stake( program_id: &Pubkey, stake_pool: &Pubkey, staker: &Pubkey, stake_pool_withdraw_authority: &Pubkey, validator_list: &Pubkey, reserve_stake: &Pubkey, transient_stake: &Pubkey, validator: &Pubkey, lamports: u64, ) -> Instruction { let accounts = vec![ AccountMeta::new_readonly(*stake_pool, false), AccountMeta::new_readonly(*staker, true), AccountMeta::new_readonly(*stake_pool_withdraw_authority, false), AccountMeta::new(*validator_list, false), AccountMeta::new(*reserve_stake, false), AccountMeta::new(*transient_stake, false), AccountMeta::new_readonly(*validator, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), AccountMeta::new_readonly(stake_program::config_id(), false), AccountMeta::new_readonly(system_program::id(), false), AccountMeta::new_readonly(stake_program::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::IncreaseValidatorStake(lamports) .try_to_vec() .unwrap(), } } /// Creates `SetPreferredDepositValidator` instruction pub fn set_preferred_validator( program_id: &Pubkey, stake_pool_address: &Pubkey, staker: &Pubkey, validator_list_address: &Pubkey, validator_type: PreferredValidatorType, validator_vote_address: Option<Pubkey>, ) -> Instruction { Instruction { program_id: *program_id, accounts: vec![ AccountMeta::new_readonly(*stake_pool_address, false), AccountMeta::new_readonly(*staker, true), AccountMeta::new(*validator_list_address, false), ], data: StakePoolInstruction::SetPreferredValidator { validator_type, validator_vote_address, } .try_to_vec() .unwrap(), } } /// Creates `CreateValidatorStakeAccount` instruction with a vote account pub fn create_validator_stake_account_with_vote( program_id: &Pubkey, stake_pool_address: &Pubkey, staker: &Pubkey, funder: &Pubkey, vote_account_address: &Pubkey, ) -> Instruction { let (stake_account, _) = find_stake_program_address(program_id, vote_account_address, stake_pool_address); create_validator_stake_account( program_id, stake_pool_address, staker, funder, &stake_account, vote_account_address, ) } /// Create an `AddValidatorToPool` instruction given an existing stake pool and /// vote account pub fn add_validator_to_pool_with_vote( program_id: &Pubkey, stake_pool: &StakePool, stake_pool_address: &Pubkey, vote_account_address: &Pubkey, ) -> Instruction { let pool_withdraw_authority = find_withdraw_authority_program_address(program_id, stake_pool_address).0; let (stake_account_address, _) = find_stake_program_address(program_id, vote_account_address, stake_pool_address); add_validator_to_pool( program_id, stake_pool_address, &stake_pool.staker, &pool_withdraw_authority, &stake_pool.validator_list, &stake_account_address, ) } /// Create an `RemoveValidatorFromPool` instruction given an existing stake pool and /// vote account pub fn remove_validator_from_pool_with_vote( program_id: &Pubkey, stake_pool: &StakePool, stake_pool_address: &Pubkey, vote_account_address: &Pubkey, new_stake_account_authority: &Pubkey, ) -> Instruction { let pool_withdraw_authority = find_withdraw_authority_program_address(program_id, stake_pool_address).0; let (stake_account_address, _) = find_stake_program_address(program_id, vote_account_address, stake_pool_address); let (transient_stake_account, _) = find_transient_stake_program_address(program_id, vote_account_address, stake_pool_address); remove_validator_from_pool( program_id, stake_pool_address, &stake_pool.staker, &pool_withdraw_authority, new_stake_account_authority, &stake_pool.validator_list, &stake_account_address, &transient_stake_account, ) } /// Create an `IncreaseValidatorStake` instruction given an existing stake pool and /// vote account pub fn increase_validator_stake_with_vote( program_id: &Pubkey, stake_pool: &StakePool, stake_pool_address: &Pubkey, vote_account_address: &Pubkey, lamports: u64, ) -> Instruction { let pool_withdraw_authority = find_withdraw_authority_program_address(program_id, stake_pool_address).0; let (transient_stake_address, _) = find_transient_stake_program_address(program_id, vote_account_address, stake_pool_address); increase_validator_stake( program_id, stake_pool_address, &stake_pool.staker, &pool_withdraw_authority, &stake_pool.validator_list, &stake_pool.reserve_stake, &transient_stake_address, vote_account_address, lamports, ) } /// Create a `DecreaseValidatorStake` instruction given an existing stake pool and /// vote account pub fn decrease_validator_stake_with_vote( program_id: &Pubkey, stake_pool: &StakePool, stake_pool_address: &Pubkey, vote_account_address: &Pubkey, lamports: u64, ) -> Instruction { let pool_withdraw_authority = find_withdraw_authority_program_address(program_id, stake_pool_address).0; let (validator_stake_address, _) = find_stake_program_address(program_id, vote_account_address, stake_pool_address); let (transient_stake_address, _) = find_transient_stake_program_address(program_id, vote_account_address, stake_pool_address); decrease_validator_stake( program_id, stake_pool_address, &stake_pool.staker, &pool_withdraw_authority, &stake_pool.validator_list, &validator_stake_address, &transient_stake_address, lamports, ) } /// Creates `UpdateValidatorListBalance` instruction (update validator stake account balances) pub fn update_validator_list_balance( program_id: &Pubkey, stake_pool: &Pubkey, stake_pool_withdraw_authority: &Pubkey, validator_list: &Pubkey, reserve_stake: &Pubkey, validator_vote_accounts: &[Pubkey], start_index: u32, no_merge: bool, ) -> Instruction { let mut accounts = vec![ AccountMeta::new_readonly(*stake_pool, false), AccountMeta::new_readonly(*stake_pool_withdraw_authority, false), AccountMeta::new(*validator_list, false), AccountMeta::new(*reserve_stake, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), AccountMeta::new_readonly(stake_program::id(), false), ]; accounts.append( &mut validator_vote_accounts .iter() .flat_map(|vote_account_address| { let (validator_stake_account, _) = find_stake_program_address(program_id, vote_account_address, stake_pool); let (transient_stake_account, _) = find_transient_stake_program_address( program_id, vote_account_address, stake_pool, ); vec![ AccountMeta::new(validator_stake_account, false), AccountMeta::new(transient_stake_account, false), ] }) .collect::<Vec<AccountMeta>>(), ); Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::UpdateValidatorListBalance { start_index, no_merge, } .try_to_vec() .unwrap(), } } /// Creates `UpdateStakePoolBalance` instruction (pool balance from the stake account list balances) pub fn update_stake_pool_balance( program_id: &Pubkey, stake_pool: &Pubkey, withdraw_authority: &Pubkey, validator_list_storage: &Pubkey, reserve_stake: &Pubkey, manager_fee_account: &Pubkey, stake_pool_mint: &Pubkey, ) -> Instruction { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new_readonly(*withdraw_authority, false), AccountMeta::new(*validator_list_storage, false), AccountMeta::new_readonly(*reserve_stake, false), AccountMeta::new(*manager_fee_account, false), AccountMeta::new(*stake_pool_mint, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::UpdateStakePoolBalance .try_to_vec() .unwrap(), } } /// Creates all `UpdateValidatorListBalance` and `UpdateStakePoolBalance` /// instructions for fully updating a stake pool each epoch pub fn update_stake_pool( program_id: &Pubkey, stake_pool: &StakePool, validator_list: &ValidatorList, stake_pool_address: &Pubkey, no_merge: bool, ) -> Vec<Instruction> { let vote_accounts: Vec<Pubkey> = validator_list .validators .iter() .map(|item| item.vote_account_address) .collect(); let (withdraw_authority, _) = find_withdraw_authority_program_address(program_id, stake_pool_address); let mut instructions: Vec<Instruction> = vec![]; let mut start_index = 0; for accounts_chunk in vote_accounts.chunks(MAX_VALIDATORS_TO_UPDATE) { instructions.push(update_validator_list_balance( program_id, stake_pool_address, &withdraw_authority, &stake_pool.validator_list, &stake_pool.reserve_stake, accounts_chunk, start_index, no_merge, )); start_index += MAX_VALIDATORS_TO_UPDATE as u32; } instructions.push(update_stake_pool_balance( program_id, stake_pool_address, &withdraw_authority, &stake_pool.validator_list, &stake_pool.reserve_stake, &stake_pool.manager_fee_account, &stake_pool.pool_mint, )); instructions } /// Creates instructions required to deposit into a stake pool, given a stake /// account owned by the user. pub fn deposit( program_id: &Pubkey, stake_pool: &Pubkey, validator_list_storage: &Pubkey, stake_pool_withdraw_authority: &Pubkey, deposit_stake_address: &Pubkey, deposit_stake_withdraw_authority: &Pubkey, validator_stake_account: &Pubkey, reserve_stake_account: &Pubkey, pool_tokens_to: &Pubkey, pool_mint: &Pubkey, token_program_id: &Pubkey, ) -> Vec<Instruction> { let stake_pool_deposit_authority = find_deposit_authority_program_address(program_id, stake_pool).0; let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new(*validator_list_storage, false), AccountMeta::new_readonly(stake_pool_deposit_authority, false), AccountMeta::new_readonly(*stake_pool_withdraw_authority, false), AccountMeta::new(*deposit_stake_address, false), AccountMeta::new(*validator_stake_account, false), AccountMeta::new(*reserve_stake_account, false), AccountMeta::new(*pool_tokens_to, false), AccountMeta::new(*pool_mint, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), AccountMeta::new_readonly(*token_program_id, false), AccountMeta::new_readonly(stake_program::id(), false), ]; vec![ stake_program::authorize( deposit_stake_address, deposit_stake_withdraw_authority, &stake_pool_deposit_authority, stake_program::StakeAuthorize::Staker, ), stake_program::authorize( deposit_stake_address, deposit_stake_withdraw_authority, &stake_pool_deposit_authority, stake_program::StakeAuthorize::Withdrawer, ), Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::Deposit.try_to_vec().unwrap(), }, ] } /// Creates instructions required to deposit into a stake pool, given a stake /// account owned by the user. The difference with `deposit()` is that a deposit /// authority must sign this instruction, which is required for private pools. pub fn deposit_with_authority( program_id: &Pubkey, stake_pool: &Pubkey, validator_list_storage: &Pubkey, stake_pool_deposit_authority: &Pubkey, stake_pool_withdraw_authority: &Pubkey, deposit_stake_address: &Pubkey, deposit_stake_withdraw_authority: &Pubkey, validator_stake_account: &Pubkey, reserve_stake_account: &Pubkey, pool_tokens_to: &Pubkey, pool_mint: &Pubkey, token_program_id: &Pubkey, ) -> Vec<Instruction> { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new(*validator_list_storage, false), AccountMeta::new_readonly(*stake_pool_deposit_authority, true), AccountMeta::new_readonly(*stake_pool_withdraw_authority, false), AccountMeta::new(*deposit_stake_address, false), AccountMeta::new(*validator_stake_account, false), AccountMeta::new(*reserve_stake_account, false), AccountMeta::new(*pool_tokens_to, false), AccountMeta::new(*pool_mint, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), AccountMeta::new_readonly(*token_program_id, false), AccountMeta::new_readonly(stake_program::id(), false), ]; vec![ stake_program::authorize( deposit_stake_address, deposit_stake_withdraw_authority, stake_pool_deposit_authority, stake_program::StakeAuthorize::Staker, ), stake_program::authorize( deposit_stake_address, deposit_stake_withdraw_authority, stake_pool_deposit_authority, stake_program::StakeAuthorize::Withdrawer, ), Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::Deposit.try_to_vec().unwrap(), }, ] } /// Creates a 'withdraw' instruction. pub fn withdraw( program_id: &Pubkey, stake_pool: &Pubkey, validator_list_storage: &Pubkey, stake_pool_withdraw: &Pubkey, stake_to_split: &Pubkey, stake_to_receive: &Pubkey, user_stake_authority: &Pubkey, user_transfer_authority: &Pubkey, user_pool_token_account: &Pubkey, pool_mint: &Pubkey, token_program_id: &Pubkey, amount: u64, ) -> Instruction { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new(*validator_list_storage, false), AccountMeta::new_readonly(*stake_pool_withdraw, false), AccountMeta::new(*stake_to_split, false), AccountMeta::new(*stake_to_receive, false), AccountMeta::new_readonly(*user_stake_authority, false), AccountMeta::new_readonly(*user_transfer_authority, true), AccountMeta::new(*user_pool_token_account, false), AccountMeta::new(*pool_mint, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(*token_program_id, false), AccountMeta::new_readonly(stake_program::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::Withdraw(amount).try_to_vec().unwrap(), } } /// Creates a 'set manager' instruction. pub fn set_manager( program_id: &Pubkey, stake_pool: &Pubkey, manager: &Pubkey, new_manager: &Pubkey, new_fee_receiver: &Pubkey, ) -> Instruction { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new_readonly(*manager, true), AccountMeta::new_readonly(*new_manager, false), AccountMeta::new_readonly(*new_fee_receiver, false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::SetManager.try_to_vec().unwrap(), } } /// Creates a 'set fee' instruction. pub fn set_fee( program_id: &Pubkey, stake_pool: &Pubkey, manager: &Pubkey, fee: Fee, ) -> Instruction { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new_readonly(*manager, true), AccountMeta::new_readonly(sysvar::clock::id(), false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::SetFee { fee }.try_to_vec().unwrap(), } } /// Creates a 'set staker' instruction. pub fn set_staker( program_id: &Pubkey, stake_pool: &Pubkey, set_staker_authority: &Pubkey, new_staker: &Pubkey, ) -> Instruction { let accounts = vec![ AccountMeta::new(*stake_pool, false), AccountMeta::new_readonly(*set_staker_authority, true), AccountMeta::new_readonly(*new_staker, false), ]; Instruction { program_id: *program_id, accounts, data: StakePoolInstruction::SetStaker.try_to_vec().unwrap(), } }
36.864615
148
0.649751
1a78f5065813ad750cceb6fbfbfdf571d18d74c0
93
pub mod view; pub mod design; pub mod system; pub mod document; pub mod find; pub mod index;
13.285714
17
0.741935
fc21412821445f70b708fe012cd982ac633ccea7
632
use proptest::arbitrary::any; use proptest::strategy::{BoxedStrategy, Strategy}; use liblumen_alloc::erts::term::prelude::Atom; use crate::test::strategy::term::NON_EXISTENT_ATOM_PREFIX; pub fn module() -> BoxedStrategy<Atom> { atom() } pub fn function() -> BoxedStrategy<Atom> { atom() } pub fn arity() -> BoxedStrategy<u8> { (0_u8..=255_u8).boxed() } fn atom() -> BoxedStrategy<Atom> { any::<String>() .prop_filter("Reserved for existing/safe atom tests", |s| { !s.starts_with(NON_EXISTENT_ATOM_PREFIX) }) .prop_map(|s| Atom::try_from_str(&s).unwrap()) .boxed() }
22.571429
67
0.636076
ac63a9aeabc8eacab7179d615fc98906692d6780
1,512
#![feature(map_first_last)] mod bigint; mod bitmap; mod continuation_table; mod crc32; mod gc; mod leb128; mod mark_stack; mod memory; mod principal_id; mod stream; mod text; mod utf8; use motoko_rts::types::Bytes; fn main() { if std::mem::size_of::<usize>() != 4 { println!("Motoko RTS only works on 32-bit architectures"); std::process::exit(1); } unsafe { bigint::test(); bitmap::test(); continuation_table::test(); crc32::test(); gc::test(); leb128::test(); mark_stack::test(); principal_id::test(); stream::test(); text::test(); utf8::test(); } } // Called by the RTS to panic #[no_mangle] extern "C" fn rts_trap(ptr: *const u8, len: Bytes<u32>) -> ! { let msg = unsafe { std::slice::from_raw_parts(ptr, len.as_usize()) }; match core::str::from_utf8(msg) { Err(err) => panic!( "rts_trap_with called with non-UTF8 string (error={:?}, string={:?})", err, msg ), Ok(str) => panic!("rts_trap_with: {:?}", str), } } // Called by RTS BigInt functions to panic. Normally generated by the compiler #[no_mangle] extern "C" fn bigint_trap() -> ! { panic!("bigint_trap called"); } // Called by the RTS for debug prints #[no_mangle] unsafe extern "C" fn print_ptr(ptr: usize, len: u32) { let str: &[u8] = core::slice::from_raw_parts(ptr as *const u8, len as usize); println!("[RTS] {}", String::from_utf8_lossy(str)); }
23.625
82
0.58664
abdcd56444213e121fbb7f023a6df1674716d554
2,409
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use target_strs; use syntax::abi; pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t { return target_strs::t { module_asm: "".to_string(), data_layout: match target_os { abi::OsMacos => { "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\ f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\ s0:64:64-f80:128:128-n8:16:32:64".to_string() } abi::OsiOS => { "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\ f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\ s0:64:64-f80:128:128-n8:16:32:64".to_string() } abi::OsWindows => { // FIXME: Test this. Copied from Linux (#2398) "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\ f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\ s0:64:64-f80:128:128-n8:16:32:64-S128".to_string() } abi::OsLinux => { "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\ f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\ s0:64:64-f80:128:128-n8:16:32:64-S128".to_string() } abi::OsAndroid => { "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\ f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\ s0:64:64-f80:128:128-n8:16:32:64-S128".to_string() } abi::OsBitrig | abi::OsDragonfly | abi::OsFreebsd | abi::OsNetbsd | abi::OsOpenbsd => { "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\ f32:32:32-f64:64:64-v64:64:64-v128:128:128-a:0:64-\ s0:64:64-f80:128:128-n8:16:32:64-S128".to_string() } }, target_triple: target_triple, cc_args: vec!("-m64".to_string()), }; }
38.238095
97
0.555002
ace4144e362a00c113f4c43b83baa1914ea038d7
35,215
use std::collections::VecDeque; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use std::{fmt, io, net}; use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed, FramedParts}; use actix_rt::time::{delay_until, Delay, Instant}; use actix_service::Service; use bitflags::bitflags; use bytes::{Buf, BytesMut}; use log::{error, trace}; use pin_project::pin_project; use crate::cloneable::CloneableService; use crate::config::ServiceConfig; use crate::error::{DispatchError, Error}; use crate::error::{ParseError, PayloadError}; use crate::helpers::DataFactory; use crate::httpmessage::HttpMessage; use crate::request::Request; use crate::response::Response; use crate::{ body::{Body, BodySize, MessageBody, ResponseBody}, Extensions, }; use super::codec::Codec; use super::payload::{Payload, PayloadSender, PayloadStatus}; use super::{Message, MessageType}; const LW_BUFFER_SIZE: usize = 4096; const HW_BUFFER_SIZE: usize = 32_768; const MAX_PIPELINED_MESSAGES: usize = 16; bitflags! { pub struct Flags: u8 { const STARTED = 0b0000_0001; const KEEPALIVE = 0b0000_0010; const POLLED = 0b0000_0100; const SHUTDOWN = 0b0000_1000; const READ_DISCONNECT = 0b0001_0000; const WRITE_DISCONNECT = 0b0010_0000; const UPGRADE = 0b0100_0000; } } #[pin_project::pin_project] /// Dispatcher for HTTP/1.1 protocol pub struct Dispatcher<T, S, B, X, U> where S: Service<Request = Request>, S::Error: Into<Error>, B: MessageBody, X: Service<Request = Request, Response = Request>, X::Error: Into<Error>, U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U::Error: fmt::Display, { #[pin] inner: DispatcherState<T, S, B, X, U>, } #[pin_project(project = DispatcherStateProj)] enum DispatcherState<T, S, B, X, U> where S: Service<Request = Request>, S::Error: Into<Error>, B: MessageBody, X: Service<Request = Request, Response = Request>, X::Error: Into<Error>, U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U::Error: fmt::Display, { Normal(#[pin] InnerDispatcher<T, S, B, X, U>), Upgrade(Pin<Box<U::Future>>), } #[pin_project(project = InnerDispatcherProj)] struct InnerDispatcher<T, S, B, X, U> where S: Service<Request = Request>, S::Error: Into<Error>, B: MessageBody, X: Service<Request = Request, Response = Request>, X::Error: Into<Error>, U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U::Error: fmt::Display, { service: CloneableService<S>, expect: CloneableService<X>, upgrade: Option<CloneableService<U>>, on_connect: Option<Box<dyn DataFactory>>, on_connect_data: Extensions, flags: Flags, peer_addr: Option<net::SocketAddr>, error: Option<DispatchError>, #[pin] state: State<S, B, X>, payload: Option<PayloadSender>, messages: VecDeque<DispatcherMessage>, ka_expire: Instant, ka_timer: Option<Delay>, io: Option<T>, read_buf: BytesMut, write_buf: BytesMut, codec: Codec, } enum DispatcherMessage { Item(Request), Upgrade(Request), Error(Response<()>), } #[pin_project(project = StateProj)] enum State<S, B, X> where S: Service<Request = Request>, X: Service<Request = Request, Response = Request>, B: MessageBody, { None, ExpectCall(Pin<Box<X::Future>>), ServiceCall(Pin<Box<S::Future>>), SendPayload(#[pin] ResponseBody<B>), } impl<S, B, X> State<S, B, X> where S: Service<Request = Request>, X: Service<Request = Request, Response = Request>, B: MessageBody, { fn is_empty(&self) -> bool { matches!(self, State::None) } fn is_call(&self) -> bool { matches!(self, State::ServiceCall(_)) } } enum PollResponse { Upgrade(Request), DoNothing, DrainWriteBuf, } impl PartialEq for PollResponse { fn eq(&self, other: &PollResponse) -> bool { match self { PollResponse::DrainWriteBuf => matches!(other, PollResponse::DrainWriteBuf), PollResponse::DoNothing => matches!(other, PollResponse::DoNothing), _ => false, } } } impl<T, S, B, X, U> Dispatcher<T, S, B, X, U> where T: AsyncRead + AsyncWrite + Unpin, S: Service<Request = Request>, S::Error: Into<Error>, S::Response: Into<Response<B>>, B: MessageBody, X: Service<Request = Request, Response = Request>, X::Error: Into<Error>, U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U::Error: fmt::Display, { /// Create HTTP/1 dispatcher. pub(crate) fn new( stream: T, config: ServiceConfig, service: CloneableService<S>, expect: CloneableService<X>, upgrade: Option<CloneableService<U>>, on_connect: Option<Box<dyn DataFactory>>, on_connect_data: Extensions, peer_addr: Option<net::SocketAddr>, ) -> Self { Dispatcher::with_timeout( stream, Codec::new(config.clone()), config, BytesMut::with_capacity(HW_BUFFER_SIZE), None, service, expect, upgrade, on_connect, on_connect_data, peer_addr, ) } /// Create http/1 dispatcher with slow request timeout. pub(crate) fn with_timeout( io: T, codec: Codec, config: ServiceConfig, read_buf: BytesMut, timeout: Option<Delay>, service: CloneableService<S>, expect: CloneableService<X>, upgrade: Option<CloneableService<U>>, on_connect: Option<Box<dyn DataFactory>>, on_connect_data: Extensions, peer_addr: Option<net::SocketAddr>, ) -> Self { let keepalive = config.keep_alive_enabled(); let flags = if keepalive { Flags::KEEPALIVE } else { Flags::empty() }; // keep-alive timer let (ka_expire, ka_timer) = if let Some(delay) = timeout { (delay.deadline(), Some(delay)) } else if let Some(delay) = config.keep_alive_timer() { (delay.deadline(), Some(delay)) } else { (config.now(), None) }; Dispatcher { inner: DispatcherState::Normal(InnerDispatcher { write_buf: BytesMut::with_capacity(HW_BUFFER_SIZE), payload: None, state: State::None, error: None, messages: VecDeque::new(), io: Some(io), codec, read_buf, service, expect, upgrade, on_connect, on_connect_data, flags, peer_addr, ka_expire, ka_timer, }), } } } impl<T, S, B, X, U> InnerDispatcher<T, S, B, X, U> where T: AsyncRead + AsyncWrite + Unpin, S: Service<Request = Request>, S::Error: Into<Error>, S::Response: Into<Response<B>>, B: MessageBody, X: Service<Request = Request, Response = Request>, X::Error: Into<Error>, U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U::Error: fmt::Display, { fn can_read(&self, cx: &mut Context<'_>) -> bool { if self .flags .intersects(Flags::READ_DISCONNECT | Flags::UPGRADE) { false } else if let Some(ref info) = self.payload { info.need_read(cx) == PayloadStatus::Read } else { true } } // if checked is set to true, delay disconnect until all tasks have finished. fn client_disconnected(self: Pin<&mut Self>) { let this = self.project(); this.flags .insert(Flags::READ_DISCONNECT | Flags::WRITE_DISCONNECT); if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::Incomplete(None)); } } /// Flush stream /// /// true - got WouldBlock /// false - didn't get WouldBlock fn poll_flush( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Result<bool, DispatchError> { if self.write_buf.is_empty() { return Ok(false); } let len = self.write_buf.len(); let mut written = 0; let InnerDispatcherProj { io, write_buf, .. } = self.project(); let mut io = Pin::new(io.as_mut().unwrap()); while written < len { match io.as_mut().poll_write(cx, &write_buf[written..]) { Poll::Ready(Ok(0)) => { return Err(DispatchError::Io(io::Error::new( io::ErrorKind::WriteZero, "", ))); } Poll::Ready(Ok(n)) => { written += n; } Poll::Pending => { if written > 0 { write_buf.advance(written); } return Ok(true); } Poll::Ready(Err(err)) => return Err(DispatchError::Io(err)), } } if written == write_buf.len() { // SAFETY: setting length to 0 is safe // skips one length check vs truncate unsafe { write_buf.set_len(0) } } else { write_buf.advance(written); } Ok(false) } fn send_response( self: Pin<&mut Self>, message: Response<()>, body: ResponseBody<B>, ) -> Result<State<S, B, X>, DispatchError> { let mut this = self.project(); this.codec .encode(Message::Item((message, body.size())), &mut this.write_buf) .map_err(|err| { if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::Incomplete(None)); } DispatchError::Io(err) })?; this.flags.set(Flags::KEEPALIVE, this.codec.keepalive()); match body.size() { BodySize::None | BodySize::Empty => Ok(State::None), _ => Ok(State::SendPayload(body)), } } fn send_continue(self: Pin<&mut Self>) { self.project() .write_buf .extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n"); } fn poll_response( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Result<PollResponse, DispatchError> { loop { let mut this = self.as_mut().project(); let state = match this.state.project() { StateProj::None => match this.messages.pop_front() { Some(DispatcherMessage::Item(req)) => { Some(self.as_mut().handle_request(req, cx)?) } Some(DispatcherMessage::Error(res)) => Some( self.as_mut() .send_response(res, ResponseBody::Other(Body::Empty))?, ), Some(DispatcherMessage::Upgrade(req)) => { return Ok(PollResponse::Upgrade(req)); } None => None, }, StateProj::ExpectCall(fut) => match fut.as_mut().poll(cx) { Poll::Ready(Ok(req)) => { self.as_mut().send_continue(); this = self.as_mut().project(); this.state .set(State::ServiceCall(Box::pin(this.service.call(req)))); continue; } Poll::Ready(Err(e)) => { let res: Response = e.into().into(); let (res, body) = res.replace_body(()); Some(self.as_mut().send_response(res, body.into_body())?) } Poll::Pending => None, }, StateProj::ServiceCall(fut) => match fut.as_mut().poll(cx) { Poll::Ready(Ok(res)) => { let (res, body) = res.into().replace_body(()); let state = self.as_mut().send_response(res, body)?; this = self.as_mut().project(); this.state.set(state); continue; } Poll::Ready(Err(e)) => { let res: Response = e.into().into(); let (res, body) = res.replace_body(()); Some(self.as_mut().send_response(res, body.into_body())?) } Poll::Pending => None, }, StateProj::SendPayload(mut stream) => { loop { if this.write_buf.len() < HW_BUFFER_SIZE { match stream.as_mut().poll_next(cx) { Poll::Ready(Some(Ok(item))) => { this.codec.encode( Message::Chunk(Some(item)), &mut this.write_buf, )?; continue; } Poll::Ready(None) => { this.codec.encode( Message::Chunk(None), &mut this.write_buf, )?; this = self.as_mut().project(); this.state.set(State::None); } Poll::Ready(Some(Err(_))) => { return Err(DispatchError::Unknown) } Poll::Pending => return Ok(PollResponse::DoNothing), } } else { return Ok(PollResponse::DrainWriteBuf); } break; } continue; } }; this = self.as_mut().project(); // set new state if let Some(state) = state { this.state.set(state); if !self.state.is_empty() { continue; } } else { // if read-backpressure is enabled and we consumed some data. // we may read more data and retry if self.state.is_call() { if self.as_mut().poll_request(cx)? { continue; } } else if !self.messages.is_empty() { continue; } } break; } Ok(PollResponse::DoNothing) } fn handle_request( mut self: Pin<&mut Self>, req: Request, cx: &mut Context<'_>, ) -> Result<State<S, B, X>, DispatchError> { // Handle `EXPECT: 100-Continue` header let req = if req.head().expect() { let mut task = Box::pin(self.as_mut().project().expect.call(req)); match task.as_mut().poll(cx) { Poll::Ready(Ok(req)) => { self.as_mut().send_continue(); req } Poll::Pending => return Ok(State::ExpectCall(task)), Poll::Ready(Err(e)) => { let e = e.into(); let res: Response = e.into(); let (res, body) = res.replace_body(()); return self.send_response(res, body.into_body()); } } } else { req }; // Call service let mut task = Box::pin(self.as_mut().project().service.call(req)); match task.as_mut().poll(cx) { Poll::Ready(Ok(res)) => { let (res, body) = res.into().replace_body(()); self.send_response(res, body) } Poll::Pending => Ok(State::ServiceCall(task)), Poll::Ready(Err(e)) => { let res: Response = e.into().into(); let (res, body) = res.replace_body(()); self.send_response(res, body.into_body()) } } } /// Process one incoming requests pub(self) fn poll_request( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Result<bool, DispatchError> { // limit a mount of non processed requests if self.messages.len() >= MAX_PIPELINED_MESSAGES || !self.can_read(cx) { return Ok(false); } let mut updated = false; let mut this = self.as_mut().project(); loop { match this.codec.decode(&mut this.read_buf) { Ok(Some(msg)) => { updated = true; this.flags.insert(Flags::STARTED); match msg { Message::Item(mut req) => { let pl = this.codec.message_type(); req.head_mut().peer_addr = *this.peer_addr; // DEPRECATED // set on_connect data if let Some(ref on_connect) = this.on_connect { on_connect.set(&mut req.extensions_mut()); } // merge on_connect_ext data into request extensions req.extensions_mut().drain_from(this.on_connect_data); if pl == MessageType::Stream && this.upgrade.is_some() { this.messages.push_back(DispatcherMessage::Upgrade(req)); break; } if pl == MessageType::Payload || pl == MessageType::Stream { let (ps, pl) = Payload::create(false); let (req1, _) = req.replace_payload(crate::Payload::H1(pl)); req = req1; *this.payload = Some(ps); } // handle request early if this.state.is_empty() { let state = self.as_mut().handle_request(req, cx)?; this = self.as_mut().project(); this.state.set(state); } else { this.messages.push_back(DispatcherMessage::Item(req)); } } Message::Chunk(Some(chunk)) => { if let Some(ref mut payload) = this.payload { payload.feed_data(chunk); } else { error!( "Internal server error: unexpected payload chunk" ); this.flags.insert(Flags::READ_DISCONNECT); this.messages.push_back(DispatcherMessage::Error( Response::InternalServerError().finish().drop_body(), )); *this.error = Some(DispatchError::InternalError); break; } } Message::Chunk(None) => { if let Some(mut payload) = this.payload.take() { payload.feed_eof(); } else { error!("Internal server error: unexpected eof"); this.flags.insert(Flags::READ_DISCONNECT); this.messages.push_back(DispatcherMessage::Error( Response::InternalServerError().finish().drop_body(), )); *this.error = Some(DispatchError::InternalError); break; } } } } Ok(None) => break, Err(ParseError::Io(e)) => { self.as_mut().client_disconnected(); this = self.as_mut().project(); *this.error = Some(DispatchError::Io(e)); break; } Err(e) => { if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::EncodingCorrupted); } // Malformed requests should be responded with 400 this.messages.push_back(DispatcherMessage::Error( Response::BadRequest().finish().drop_body(), )); this.flags.insert(Flags::READ_DISCONNECT); *this.error = Some(e.into()); break; } } } if updated && this.ka_timer.is_some() { if let Some(expire) = this.codec.config().keep_alive_expire() { *this.ka_expire = expire; } } Ok(updated) } /// keep-alive timer fn poll_keepalive( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Result<(), DispatchError> { let mut this = self.as_mut().project(); if this.ka_timer.is_none() { // shutdown timeout if this.flags.contains(Flags::SHUTDOWN) { if let Some(interval) = this.codec.config().client_disconnect_timer() { *this.ka_timer = Some(delay_until(interval)); } else { this.flags.insert(Flags::READ_DISCONNECT); if let Some(mut payload) = this.payload.take() { payload.set_error(PayloadError::Incomplete(None)); } return Ok(()); } } else { return Ok(()); } } match Pin::new(&mut this.ka_timer.as_mut().unwrap()).poll(cx) { Poll::Ready(()) => { // if we get timeout during shutdown, drop connection if this.flags.contains(Flags::SHUTDOWN) { return Err(DispatchError::DisconnectTimeout); } else if this.ka_timer.as_mut().unwrap().deadline() >= *this.ka_expire { // check for any outstanding tasks if this.state.is_empty() && this.write_buf.is_empty() { if this.flags.contains(Flags::STARTED) { trace!("Keep-alive timeout, close connection"); this.flags.insert(Flags::SHUTDOWN); // start shutdown timer if let Some(deadline) = this.codec.config().client_disconnect_timer() { if let Some(mut timer) = this.ka_timer.as_mut() { timer.reset(deadline); let _ = Pin::new(&mut timer).poll(cx); } } else { // no shutdown timeout, drop socket this.flags.insert(Flags::WRITE_DISCONNECT); return Ok(()); } } else { // timeout on first request (slow request) return 408 if !this.flags.contains(Flags::STARTED) { trace!("Slow request timeout"); let _ = self.as_mut().send_response( Response::RequestTimeout().finish().drop_body(), ResponseBody::Other(Body::Empty), ); this = self.as_mut().project(); } else { trace!("Keep-alive connection timeout"); } this.flags.insert(Flags::STARTED | Flags::SHUTDOWN); this.state.set(State::None); } } else if let Some(deadline) = this.codec.config().keep_alive_expire() { if let Some(mut timer) = this.ka_timer.as_mut() { timer.reset(deadline); let _ = Pin::new(&mut timer).poll(cx); } } } else if let Some(mut timer) = this.ka_timer.as_mut() { timer.reset(*this.ka_expire); let _ = Pin::new(&mut timer).poll(cx); } } Poll::Pending => (), } Ok(()) } } impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U> where T: AsyncRead + AsyncWrite + Unpin, S: Service<Request = Request>, S::Error: Into<Error>, S::Response: Into<Response<B>>, B: MessageBody, X: Service<Request = Request, Response = Request>, X::Error: Into<Error>, U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U::Error: fmt::Display, { type Output = Result<(), DispatchError>; #[inline] fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.as_mut().project(); match this.inner.project() { DispatcherStateProj::Normal(mut inner) => { inner.as_mut().poll_keepalive(cx)?; if inner.flags.contains(Flags::SHUTDOWN) { if inner.flags.contains(Flags::WRITE_DISCONNECT) { Poll::Ready(Ok(())) } else { // flush buffer inner.as_mut().poll_flush(cx)?; if !inner.write_buf.is_empty() || inner.io.is_none() { Poll::Pending } else { match Pin::new(inner.project().io) .as_pin_mut() .unwrap() .poll_shutdown(cx) { Poll::Ready(res) => { Poll::Ready(res.map_err(DispatchError::from)) } Poll::Pending => Poll::Pending, } } } } else { // read socket into a buf let should_disconnect = if !inner.flags.contains(Flags::READ_DISCONNECT) { let mut inner_p = inner.as_mut().project(); read_available( cx, inner_p.io.as_mut().unwrap(), &mut inner_p.read_buf, )? } else { None }; inner.as_mut().poll_request(cx)?; if let Some(true) = should_disconnect { let inner_p = inner.as_mut().project(); inner_p.flags.insert(Flags::READ_DISCONNECT); if let Some(mut payload) = inner_p.payload.take() { payload.feed_eof(); } }; loop { let inner_p = inner.as_mut().project(); let remaining = inner_p.write_buf.capacity() - inner_p.write_buf.len(); if remaining < LW_BUFFER_SIZE { inner_p.write_buf.reserve(HW_BUFFER_SIZE - remaining); } let result = inner.as_mut().poll_response(cx)?; let drain = result == PollResponse::DrainWriteBuf; // switch to upgrade handler if let PollResponse::Upgrade(req) = result { let inner_p = inner.as_mut().project(); let mut parts = FramedParts::with_read_buf( inner_p.io.take().unwrap(), std::mem::take(inner_p.codec), std::mem::take(inner_p.read_buf), ); parts.write_buf = std::mem::take(inner_p.write_buf); let framed = Framed::from_parts(parts); let upgrade = inner_p.upgrade.take().unwrap().call((req, framed)); self.as_mut() .project() .inner .set(DispatcherState::Upgrade(Box::pin(upgrade))); return self.poll(cx); } // we didn't get WouldBlock from write operation, // so data get written to kernel completely (OSX) // and we have to write again otherwise response can get stuck if inner.as_mut().poll_flush(cx)? || !drain { break; } } // client is gone if inner.flags.contains(Flags::WRITE_DISCONNECT) { return Poll::Ready(Ok(())); } let is_empty = inner.state.is_empty(); let inner_p = inner.as_mut().project(); // read half is closed and we do not processing any responses if inner_p.flags.contains(Flags::READ_DISCONNECT) && is_empty { inner_p.flags.insert(Flags::SHUTDOWN); } // keep-alive and stream errors if is_empty && inner_p.write_buf.is_empty() { if let Some(err) = inner_p.error.take() { Poll::Ready(Err(err)) } // disconnect if keep-alive is not enabled else if inner_p.flags.contains(Flags::STARTED) && !inner_p.flags.intersects(Flags::KEEPALIVE) { inner_p.flags.insert(Flags::SHUTDOWN); self.poll(cx) } // disconnect if shutdown else if inner_p.flags.contains(Flags::SHUTDOWN) { self.poll(cx) } else { Poll::Pending } } else { Poll::Pending } } } DispatcherStateProj::Upgrade(fut) => fut.as_mut().poll(cx).map_err(|e| { error!("Upgrade handler error: {}", e); DispatchError::Upgrade }), } } } fn read_available<T>( cx: &mut Context<'_>, io: &mut T, buf: &mut BytesMut, ) -> Result<Option<bool>, io::Error> where T: AsyncRead + Unpin, { let mut read_some = false; loop { // If buf is full return but do not disconnect since // there is more reading to be done if buf.len() >= HW_BUFFER_SIZE { return Ok(Some(false)); } let remaining = buf.capacity() - buf.len(); if remaining < LW_BUFFER_SIZE { buf.reserve(HW_BUFFER_SIZE - remaining); } match read(cx, io, buf) { Poll::Pending => { return if read_some { Ok(Some(false)) } else { Ok(None) }; } Poll::Ready(Ok(n)) => { if n == 0 { return Ok(Some(true)); } else { read_some = true; } } Poll::Ready(Err(e)) => { return if e.kind() == io::ErrorKind::WouldBlock { if read_some { Ok(Some(false)) } else { Ok(None) } } else if e.kind() == io::ErrorKind::ConnectionReset && read_some { Ok(Some(true)) } else { Err(e) } } } } } fn read<T>( cx: &mut Context<'_>, io: &mut T, buf: &mut BytesMut, ) -> Poll<Result<usize, io::Error>> where T: AsyncRead + Unpin, { Pin::new(io).poll_read_buf(cx, buf) } #[cfg(test)] mod tests { use actix_service::IntoService; use futures_util::future::{lazy, ok}; use super::*; use crate::error::Error; use crate::h1::{ExpectHandler, UpgradeHandler}; use crate::test::TestBuffer; #[actix_rt::test] async fn test_req_parse_err() { lazy(|cx| { let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n"); let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<TestBuffer>>::new( buf, ServiceConfig::default(), CloneableService::new( (|_| ok::<_, Error>(Response::Ok().finish())).into_service(), ), CloneableService::new(ExpectHandler), None, None, Extensions::new(), None, ); match Pin::new(&mut h1).poll(cx) { Poll::Pending => panic!(), Poll::Ready(res) => assert!(res.is_err()), } if let DispatcherState::Normal(ref mut inner) = h1.inner { assert!(inner.flags.contains(Flags::READ_DISCONNECT)); assert_eq!( &inner.io.take().unwrap().write_buf[..26], b"HTTP/1.1 400 Bad Request\r\n" ); } }) .await; } }
36.606029
89
0.443873
9cc6ef348e6eeeb1bab52f9af40a25f2c74efaca
397
pub mod body; pub use self::body::Body; pub mod area; pub use self::area::Area; mod delay; pub use self::delay::Delay; pub mod header; pub mod log; mod message; pub use self::message::Message; pub mod name; pub use self::name::Name; mod named_values; pub use self::named_values::NamedValueVisitor; mod options; pub use self::options::Options; mod version; pub use self::version::Version;
14.178571
46
0.72796
698481e3737e4563bcf017671b106926d3391db7
28,044
use std::collections::HashMap; use std::io::{Read, Write}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use brotli2::write::BrotliEncoder; use bytes::Bytes; use flate2::read::GzDecoder; use flate2::write::GzEncoder; use flate2::Compression; use futures::Future; use rand::Rng; use actix_codec::{AsyncRead, AsyncWrite}; use actix_http::HttpService; use actix_http_test::TestServer; use actix_service::{service_fn, NewService}; use actix_web::http::{Cookie, Version}; use actix_web::middleware::{BodyEncoding, Compress}; use actix_web::{http::header, web, App, Error, HttpMessage, HttpRequest, HttpResponse}; use awc::error::SendRequestError; const STR: &str = "Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World"; #[cfg(feature = "ssl")] fn ssl_acceptor<T: AsyncRead + AsyncWrite>( ) -> std::io::Result<actix_server::ssl::OpensslAcceptor<T, ()>> { use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; // load ssl keys let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); builder .set_private_key_file("../tests/key.pem", SslFiletype::PEM) .unwrap(); builder .set_certificate_chain_file("../tests/cert.pem") .unwrap(); builder.set_alpn_select_callback(|_, protos| { const H2: &[u8] = b"\x02h2"; if protos.windows(3).any(|window| window == H2) { Ok(b"h2") } else { Err(openssl::ssl::AlpnError::NOACK) } }); builder.set_alpn_protos(b"\x02h2")?; Ok(actix_server::ssl::OpensslAcceptor::new(builder.build())) } #[test] fn test_simple() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service( web::resource("/").route(web::to(|| HttpResponse::Ok().body(STR))), )) }); let request = srv.get("/").header("x-test", "111").send(); let mut response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); // read response let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from_static(STR.as_ref())); let mut response = srv.block_on(srv.post("/").send()).unwrap(); assert!(response.status().is_success()); // read response let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from_static(STR.as_ref())); // camel case let response = srv.block_on(srv.post("/").camel_case().send()).unwrap(); assert!(response.status().is_success()); } #[test] fn test_json() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service( web::resource("/").route(web::to(|_: web::Json<String>| HttpResponse::Ok())), )) }); let request = srv .get("/") .header("x-test", "111") .send_json(&"TEST".to_string()); let response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); } #[test] fn test_form() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").route(web::to( |_: web::Form<HashMap<String, String>>| HttpResponse::Ok(), )))) }); let mut data = HashMap::new(); let _ = data.insert("key".to_string(), "TEST".to_string()); let request = srv.get("/").header("x-test", "111").send_form(&data); let response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); } #[test] fn test_timeout() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").route(web::to_async( || { tokio_timer::sleep(Duration::from_millis(200)) .then(|_| Ok::<_, Error>(HttpResponse::Ok().body(STR))) }, )))) }); let client = srv.execute(|| { awc::Client::build() .timeout(Duration::from_millis(50)) .finish() }); let request = client.get(srv.url("/")).send(); match srv.block_on(request) { Err(SendRequestError::Timeout) => (), _ => panic!(), } } #[test] fn test_timeout_override() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").route(web::to_async( || { tokio_timer::sleep(Duration::from_millis(200)) .then(|_| Ok::<_, Error>(HttpResponse::Ok().body(STR))) }, )))) }); let client = awc::Client::build() .timeout(Duration::from_millis(50000)) .finish(); let request = client .get(srv.url("/")) .timeout(Duration::from_millis(50)) .send(); match srv.block_on(request) { Err(SendRequestError::Timeout) => (), _ => panic!(), } } #[test] fn test_connection_reuse() { let num = Arc::new(AtomicUsize::new(0)); let num2 = num.clone(); let mut srv = TestServer::new(move || { let num2 = num2.clone(); service_fn(move |io| { num2.fetch_add(1, Ordering::Relaxed); Ok(io) }) .and_then(HttpService::new( App::new().service(web::resource("/").route(web::to(|| HttpResponse::Ok()))), )) }); let client = awc::Client::default(); // req 1 let request = client.get(srv.url("/")).send(); let response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); // req 2 let req = client.post(srv.url("/")); let response = srv.block_on_fn(move || req.send()).unwrap(); assert!(response.status().is_success()); // one connection assert_eq!(num.load(Ordering::Relaxed), 1); } #[cfg(feature = "ssl")] #[test] fn test_connection_reuse_h2() { let openssl = ssl_acceptor().unwrap(); let num = Arc::new(AtomicUsize::new(0)); let num2 = num.clone(); let mut srv = TestServer::new(move || { let num2 = num2.clone(); service_fn(move |io| { num2.fetch_add(1, Ordering::Relaxed); Ok(io) }) .and_then( openssl .clone() .map_err(|e| println!("Openssl error: {}", e)), ) .and_then( HttpService::build() .h2(App::new() .service(web::resource("/").route(web::to(|| HttpResponse::Ok())))) .map_err(|_| ()), ) }); // disable ssl verification use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode}; let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_verify(SslVerifyMode::NONE); let _ = builder .set_alpn_protos(b"\x02h2\x08http/1.1") .map_err(|e| log::error!("Can not set alpn protocol: {:?}", e)); let client = awc::Client::build() .connector(awc::Connector::new().ssl(builder.build()).finish()) .finish(); // req 1 let request = client.get(srv.surl("/")).send(); let response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); // req 2 let req = client.post(srv.surl("/")); let response = srv.block_on_fn(move || req.send()).unwrap(); assert!(response.status().is_success()); assert_eq!(response.version(), Version::HTTP_2); // one connection assert_eq!(num.load(Ordering::Relaxed), 1); } #[test] fn test_connection_force_close() { let num = Arc::new(AtomicUsize::new(0)); let num2 = num.clone(); let mut srv = TestServer::new(move || { let num2 = num2.clone(); service_fn(move |io| { num2.fetch_add(1, Ordering::Relaxed); Ok(io) }) .and_then(HttpService::new( App::new().service(web::resource("/").route(web::to(|| HttpResponse::Ok()))), )) }); let client = awc::Client::default(); // req 1 let request = client.get(srv.url("/")).force_close().send(); let response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); // req 2 let req = client.post(srv.url("/")).force_close(); let response = srv.block_on_fn(move || req.send()).unwrap(); assert!(response.status().is_success()); // two connection assert_eq!(num.load(Ordering::Relaxed), 2); } #[test] fn test_connection_server_close() { let num = Arc::new(AtomicUsize::new(0)); let num2 = num.clone(); let mut srv = TestServer::new(move || { let num2 = num2.clone(); service_fn(move |io| { num2.fetch_add(1, Ordering::Relaxed); Ok(io) }) .and_then(HttpService::new( App::new().service( web::resource("/") .route(web::to(|| HttpResponse::Ok().force_close().finish())), ), )) }); let client = awc::Client::default(); // req 1 let request = client.get(srv.url("/")).send(); let response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); // req 2 let req = client.post(srv.url("/")); let response = srv.block_on_fn(move || req.send()).unwrap(); assert!(response.status().is_success()); // two connection assert_eq!(num.load(Ordering::Relaxed), 2); } #[test] fn test_connection_wait_queue() { let num = Arc::new(AtomicUsize::new(0)); let num2 = num.clone(); let mut srv = TestServer::new(move || { let num2 = num2.clone(); service_fn(move |io| { num2.fetch_add(1, Ordering::Relaxed); Ok(io) }) .and_then(HttpService::new(App::new().service( web::resource("/").route(web::to(|| HttpResponse::Ok().body(STR))), ))) }); let client = awc::Client::build() .connector(awc::Connector::new().limit(1).finish()) .finish(); // req 1 let request = client.get(srv.url("/")).send(); let mut response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); // req 2 let req2 = client.post(srv.url("/")); let req2_fut = srv.execute(move || { let mut fut = req2.send(); assert!(fut.poll().unwrap().is_not_ready()); fut }); // read response 1 let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from_static(STR.as_ref())); // req 2 let response = srv.block_on(req2_fut).unwrap(); assert!(response.status().is_success()); // two connection assert_eq!(num.load(Ordering::Relaxed), 1); } #[test] fn test_connection_wait_queue_force_close() { let num = Arc::new(AtomicUsize::new(0)); let num2 = num.clone(); let mut srv = TestServer::new(move || { let num2 = num2.clone(); service_fn(move |io| { num2.fetch_add(1, Ordering::Relaxed); Ok(io) }) .and_then(HttpService::new( App::new().service( web::resource("/") .route(web::to(|| HttpResponse::Ok().force_close().body(STR))), ), )) }); let client = awc::Client::build() .connector(awc::Connector::new().limit(1).finish()) .finish(); // req 1 let request = client.get(srv.url("/")).send(); let mut response = srv.block_on(request).unwrap(); assert!(response.status().is_success()); // req 2 let req2 = client.post(srv.url("/")); let req2_fut = srv.execute(move || { let mut fut = req2.send(); assert!(fut.poll().unwrap().is_not_ready()); fut }); // read response 1 let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from_static(STR.as_ref())); // req 2 let response = srv.block_on(req2_fut).unwrap(); assert!(response.status().is_success()); // two connection assert_eq!(num.load(Ordering::Relaxed), 2); } #[test] fn test_with_query_parameter() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").to( |req: HttpRequest| { if req.query_string().contains("qp") { HttpResponse::Ok() } else { HttpResponse::BadRequest() } }, ))) }); let res = srv .block_on(awc::Client::new().get(srv.url("/?qp=5")).send()) .unwrap(); assert!(res.status().is_success()); } #[test] fn test_no_decompress() { let mut srv = TestServer::new(|| { HttpService::new(App::new().wrap(Compress::default()).service( web::resource("/").route(web::to(|| { let mut res = HttpResponse::Ok().body(STR); res.encoding(header::ContentEncoding::Gzip); res })), )) }); let mut res = srv .block_on(awc::Client::new().get(srv.url("/")).no_decompress().send()) .unwrap(); assert!(res.status().is_success()); // read response let bytes = srv.block_on(res.body()).unwrap(); let mut e = GzDecoder::new(&bytes[..]); let mut dec = Vec::new(); e.read_to_end(&mut dec).unwrap(); assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref())); // POST let mut res = srv .block_on(awc::Client::new().post(srv.url("/")).no_decompress().send()) .unwrap(); assert!(res.status().is_success()); let bytes = srv.block_on(res.body()).unwrap(); let mut e = GzDecoder::new(&bytes[..]); let mut dec = Vec::new(); e.read_to_end(&mut dec).unwrap(); assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref())); } #[test] fn test_client_gzip_encoding() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").route(web::to(|| { let mut e = GzEncoder::new(Vec::new(), Compression::default()); e.write_all(STR.as_ref()).unwrap(); let data = e.finish().unwrap(); HttpResponse::Ok() .header("content-encoding", "gzip") .body(data) })))) }); // client request let mut response = srv.block_on(srv.post("/").send()).unwrap(); assert!(response.status().is_success()); // read response let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from_static(STR.as_ref())); } #[test] fn test_client_gzip_encoding_large() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").route(web::to(|| { let mut e = GzEncoder::new(Vec::new(), Compression::default()); e.write_all(STR.repeat(10).as_ref()).unwrap(); let data = e.finish().unwrap(); HttpResponse::Ok() .header("content-encoding", "gzip") .body(data) })))) }); // client request let mut response = srv.block_on(srv.post("/").send()).unwrap(); assert!(response.status().is_success()); // read response let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from(STR.repeat(10))); } #[test] fn test_client_gzip_encoding_large_random() { let data = rand::thread_rng() .sample_iter(&rand::distributions::Alphanumeric) .take(100_000) .collect::<String>(); let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").route(web::to( |data: Bytes| { let mut e = GzEncoder::new(Vec::new(), Compression::default()); e.write_all(&data).unwrap(); let data = e.finish().unwrap(); HttpResponse::Ok() .header("content-encoding", "gzip") .body(data) }, )))) }); // client request let mut response = srv.block_on(srv.post("/").send_body(data.clone())).unwrap(); assert!(response.status().is_success()); // read response let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from(data)); } #[test] fn test_client_brotli_encoding() { let mut srv = TestServer::new(|| { HttpService::new(App::new().service(web::resource("/").route(web::to( |data: Bytes| { let mut e = BrotliEncoder::new(Vec::new(), 5); e.write_all(&data).unwrap(); let data = e.finish().unwrap(); HttpResponse::Ok() .header("content-encoding", "br") .body(data) }, )))) }); // client request let mut response = srv.block_on(srv.post("/").send_body(STR)).unwrap(); assert!(response.status().is_success()); // read response let bytes = srv.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from_static(STR.as_ref())); } // #[test] // fn test_client_brotli_encoding_large_random() { // let data = rand::thread_rng() // .sample_iter(&rand::distributions::Alphanumeric) // .take(70_000) // .collect::<String>(); // let mut srv = test::TestServer::new(|app| { // app.handler(|req: &HttpRequest| { // req.body() // .and_then(move |bytes: Bytes| { // Ok(HttpResponse::Ok() // .content_encoding(http::ContentEncoding::Gzip) // .body(bytes)) // }) // .responder() // }) // }); // // client request // let request = srv // .client(http::Method::POST, "/") // .content_encoding(http::ContentEncoding::Br) // .body(data.clone()) // .unwrap(); // let response = srv.execute(request.send()).unwrap(); // assert!(response.status().is_success()); // // read response // let bytes = srv.execute(response.body()).unwrap(); // assert_eq!(bytes.len(), data.len()); // assert_eq!(bytes, Bytes::from(data)); // } // #[cfg(feature = "brotli")] // #[test] // fn test_client_deflate_encoding() { // let mut srv = test::TestServer::new(|app| { // app.handler(|req: &HttpRequest| { // req.body() // .and_then(|bytes: Bytes| { // Ok(HttpResponse::Ok() // .content_encoding(http::ContentEncoding::Br) // .body(bytes)) // }) // .responder() // }) // }); // // client request // let request = srv // .post() // .content_encoding(http::ContentEncoding::Deflate) // .body(STR) // .unwrap(); // let response = srv.execute(request.send()).unwrap(); // assert!(response.status().is_success()); // // read response // let bytes = srv.execute(response.body()).unwrap(); // assert_eq!(bytes, Bytes::from_static(STR.as_ref())); // } // #[test] // fn test_client_deflate_encoding_large_random() { // let data = rand::thread_rng() // .sample_iter(&rand::distributions::Alphanumeric) // .take(70_000) // .collect::<String>(); // let mut srv = test::TestServer::new(|app| { // app.handler(|req: &HttpRequest| { // req.body() // .and_then(|bytes: Bytes| { // Ok(HttpResponse::Ok() // .content_encoding(http::ContentEncoding::Br) // .body(bytes)) // }) // .responder() // }) // }); // // client request // let request = srv // .post() // .content_encoding(http::ContentEncoding::Deflate) // .body(data.clone()) // .unwrap(); // let response = srv.execute(request.send()).unwrap(); // assert!(response.status().is_success()); // // read response // let bytes = srv.execute(response.body()).unwrap(); // assert_eq!(bytes, Bytes::from(data)); // } // #[test] // fn test_client_streaming_explicit() { // let mut srv = test::TestServer::new(|app| { // app.handler(|req: &HttpRequest| { // req.body() // .map_err(Error::from) // .and_then(|body| { // Ok(HttpResponse::Ok() // .chunked() // .content_encoding(http::ContentEncoding::Identity) // .body(body)) // }) // .responder() // }) // }); // let body = once(Ok(Bytes::from_static(STR.as_ref()))); // let request = srv.get("/").body(Body::Streaming(Box::new(body))).unwrap(); // let response = srv.execute(request.send()).unwrap(); // assert!(response.status().is_success()); // // read response // let bytes = srv.execute(response.body()).unwrap(); // assert_eq!(bytes, Bytes::from_static(STR.as_ref())); // } // #[test] // fn test_body_streaming_implicit() { // let mut srv = test::TestServer::new(|app| { // app.handler(|_| { // let body = once(Ok(Bytes::from_static(STR.as_ref()))); // HttpResponse::Ok() // .content_encoding(http::ContentEncoding::Gzip) // .body(Body::Streaming(Box::new(body))) // }) // }); // let request = srv.get("/").finish().unwrap(); // let response = srv.execute(request.send()).unwrap(); // assert!(response.status().is_success()); // // read response // let bytes = srv.execute(response.body()).unwrap(); // assert_eq!(bytes, Bytes::from_static(STR.as_ref())); // } #[test] fn test_client_cookie_handling() { fn err() -> Error { use std::io::{Error as IoError, ErrorKind}; // stub some generic error Error::from(IoError::from(ErrorKind::NotFound)) } let cookie1 = Cookie::build("cookie1", "value1").finish(); let cookie2 = Cookie::build("cookie2", "value2") .domain("www.example.org") .path("/") .secure(true) .http_only(true) .finish(); // Q: are all these clones really necessary? A: Yes, possibly let cookie1b = cookie1.clone(); let cookie2b = cookie2.clone(); let mut srv = TestServer::new(move || { let cookie1 = cookie1b.clone(); let cookie2 = cookie2b.clone(); HttpService::new(App::new().route( "/", web::to(move |req: HttpRequest| { // Check cookies were sent correctly req.cookie("cookie1") .ok_or_else(err) .and_then(|c1| { if c1.value() == "value1" { Ok(()) } else { Err(err()) } }) .and_then(|()| req.cookie("cookie2").ok_or_else(err)) .and_then(|c2| { if c2.value() == "value2" { Ok(()) } else { Err(err()) } }) // Send some cookies back .map(|_| { HttpResponse::Ok() .cookie(cookie1.clone()) .cookie(cookie2.clone()) .finish() }) }), )) }); let request = srv.get("/").cookie(cookie1.clone()).cookie(cookie2.clone()); let response = srv.block_on(request.send()).unwrap(); assert!(response.status().is_success()); let c1 = response.cookie("cookie1").expect("Missing cookie1"); assert_eq!(c1, cookie1); let c2 = response.cookie("cookie2").expect("Missing cookie2"); assert_eq!(c2, cookie2); } // #[test] // fn client_read_until_eof() { // let addr = test::TestServer::unused_addr(); // thread::spawn(move || { // let lst = net::TcpListener::bind(addr).unwrap(); // for stream in lst.incoming() { // let mut stream = stream.unwrap(); // let mut b = [0; 1000]; // let _ = stream.read(&mut b).unwrap(); // let _ = stream // .write_all(b"HTTP/1.1 200 OK\r\nconnection: close\r\n\r\nwelcome!"); // } // }); // let mut sys = actix::System::new("test"); // // client request // let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) // .finish() // .unwrap(); // let response = sys.block_on(req.send()).unwrap(); // assert!(response.status().is_success()); // // read response // let bytes = sys.block_on(response.body()).unwrap(); // assert_eq!(bytes, Bytes::from_static(b"welcome!")); // } #[test] fn client_basic_auth() { let mut srv = TestServer::new(|| { HttpService::new(App::new().route( "/", web::to(|req: HttpRequest| { if req .headers() .get(header::AUTHORIZATION) .unwrap() .to_str() .unwrap() == "Basic dXNlcm5hbWU6cGFzc3dvcmQ=" { HttpResponse::Ok() } else { HttpResponse::BadRequest() } }), )) }); // set authorization header to Basic <base64 encoded username:password> let request = srv.get("/").basic_auth("username", Some("password")); let response = srv.block_on(request.send()).unwrap(); assert!(response.status().is_success()); } #[test] fn client_bearer_auth() { let mut srv = TestServer::new(|| { HttpService::new(App::new().route( "/", web::to(|req: HttpRequest| { if req .headers() .get(header::AUTHORIZATION) .unwrap() .to_str() .unwrap() == "Bearer someS3cr3tAutht0k3n" { HttpResponse::Ok() } else { HttpResponse::BadRequest() } }), )) }); // set authorization header to Bearer <token> let request = srv.get("/").bearer_auth("someS3cr3tAutht0k3n"); let response = srv.block_on(request.send()).unwrap(); assert!(response.status().is_success()); }
32.16055
89
0.532627
d612ad3f764f20b2ee77e1d05e3f5d16c12f1091
2,732
//! Helper module which defines [`FuncArgs`] to make function calling easier. #![allow(non_snake_case)] use crate::types::dynamic::Variant; use crate::Dynamic; #[cfg(feature = "no_std")] use std::prelude::v1::*; /// Trait that parses arguments to a function call. /// /// Any data type can implement this trait in order to pass arguments to [`Engine::call_fn`][crate::Engine::call_fn]. pub trait FuncArgs { /// Parse function call arguments into a container. /// /// # Example /// /// ``` /// use rhai::{Engine, Dynamic, FuncArgs, Scope}; /// /// // A struct containing function arguments /// struct Options { /// pub foo: bool, /// pub bar: String, /// pub baz: i64, /// } /// /// impl FuncArgs for Options { /// fn parse<ARGS: Extend<Dynamic>>(self, args: &mut ARGS) { /// args.extend(std::iter::once(self.foo.into())); /// args.extend(std::iter::once(self.bar.into())); /// args.extend(std::iter::once(self.baz.into())); /// } /// } /// /// # fn main() -> Result<(), Box<rhai::EvalAltResult>> { /// # #[cfg(not(feature = "no_function"))] /// # { /// let options = Options { foo: false, bar: "world".to_string(), baz: 42 }; /// /// let engine = Engine::new(); /// let mut scope = Scope::new(); /// /// let ast = engine.compile( /// " /// fn hello(x, y, z) { /// if x { `hello ${y}` } else { y + z } /// } /// ")?; /// /// let result: String = engine.call_fn(&mut scope, &ast, "hello", options)?; /// /// assert_eq!(result, "world42"); /// # } /// # Ok(()) /// # } /// ``` fn parse<ARGS: Extend<Dynamic>>(self, args: &mut ARGS); } impl<T: Variant + Clone> FuncArgs for Vec<T> { #[inline] fn parse<ARGS: Extend<Dynamic>>(self, args: &mut ARGS) { args.extend(self.into_iter().map(Dynamic::from)); } } /// Macro to implement [`FuncArgs`] for tuples of standard types (each can be /// converted into a [`Dynamic`]). macro_rules! impl_args { ($($p:ident),*) => { impl<$($p: Variant + Clone),*> FuncArgs for ($($p,)*) { #[inline] #[allow(unused_variables)] fn parse<ARGS: Extend<Dynamic>>(self, args: &mut ARGS) { let ($($p,)*) = self; $(args.extend(Some(Dynamic::from($p)));)* } } impl_args!(@pop $($p),*); }; (@pop) => { }; (@pop $head:ident) => { impl_args!(); }; (@pop $head:ident $(, $tail:ident)+) => { impl_args!($($tail),*); }; } impl_args!(A, B, C, D, E, F, G, H, J, K, L, M, N, P, Q, R, S, T, U, V);
28.757895
117
0.495242
2f3f0802f74421badf65097bcadf7d54c7e75710
2,616
use crate::{ buffer_try_push_str, buffer_write_fmt, node_was_already_visited, sql_writer::SqlWriterLogic, truncate_if_ends_with_char, AuxNodes, SqlValue, SqlWriter, Table, TableDefs, TableFields, TableSourceAssociation, }; use core::fmt::Display; impl<'entity, B, TD> SqlWriterLogic<'entity, B, TD> where B: cl_traits::String, TD: TableDefs<'entity>, TD::Associations: SqlWriter<B, Error = TD::Error>, TD::Error: From<crate::Error>, { #[inline] pub(crate) fn write_insert<'value, V>( aux: &mut AuxNodes, buffer: &mut B, table: &Table<'entity, TD>, tsa: &mut Option<TableSourceAssociation<'value, V>>, ) -> Result<(), TD::Error> where V: Display, { if node_was_already_visited(aux, table)? { return Ok(()); } let elem_opt = || { if let Some(ref el) = *tsa { (el.source_field() != table.id_field().name()).then(|| el) } else { None } }; if let Some(elem) = elem_opt() { Self::write_insert_manager( buffer, table, |local| buffer_write_fmt(local, format_args!(",{}", elem.source_field())), |local| buffer_write_fmt(local, format_args!("'{}',", elem.source_value())), )?; } else { Self::write_insert_manager(buffer, table, |_| Ok(()), |_| Ok(()))?; } let mut new_tsa = table.id_field().value().as_ref().map(TableSourceAssociation::new); table.associations().write_insert(aux, buffer, &mut new_tsa)?; Ok(()) } fn write_insert_manager( buffer: &mut B, table: &Table<'entity, TD>, foreign_key_name_cb: impl Fn(&mut B) -> crate::Result<()>, foreign_key_value_cb: impl Fn(&mut B) -> crate::Result<()>, ) -> Result<(), TD::Error> { let len_before_insert = buffer.as_ref().len(); buffer_write_fmt(buffer, format_args!("INSERT INTO \"{}\" (", TD::TABLE_NAME))?; buffer_try_push_str(buffer, table.id_field().name())?; for field in table.fields().field_names() { buffer_write_fmt(buffer, format_args!(",{}", field))?; } foreign_key_name_cb(&mut *buffer)?; buffer_try_push_str(buffer, ") VALUES (")?; let len_before_values = buffer.as_ref().len(); if let &Some(ref elem) = table.id_field().value() { elem.write(buffer)?; buffer_try_push_str(buffer, ",")?; } table.fields().write_insert_values(buffer)?; if buffer.as_ref().len() == len_before_values { buffer.truncate(len_before_insert); } else { foreign_key_value_cb(&mut *buffer)?; truncate_if_ends_with_char(buffer, ','); buffer_try_push_str(buffer, ");")?; } Ok(()) } }
30.068966
94
0.620413
69fae8c4b1d2846493b37e29d7d65a79b4cd2d54
884
use slog::{info, Logger}; mod observable_counting_semaphore; mod unix; pub use observable_counting_semaphore::*; pub use unix::{ensure_single_named_systemd_socket, incoming_from_first_systemd_socket}; /// Returns a `Future` that completes when the service should gracefully /// shutdown. Completion happens if either of `SIGINT` or `SIGTERM` are /// received. pub async fn shutdown_signal(log: Logger) { use tokio::signal::unix::{signal, SignalKind}; let mut sig_int = signal(SignalKind::interrupt()).expect("failed to install SIGINT signal handler"); let mut sig_term = signal(SignalKind::terminate()).expect("failed to install SIGTERM signal handler"); tokio::select! { _ = sig_int.recv() => { info!(log, "Caught SIGINT"); } _ = sig_term.recv() => { info!(log, "Caught SIGTERM"); } } }
31.571429
91
0.665158
7275921d09964b0cc4bf739180fdbb4a1a822b32
4,416
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::{ abs_moniker::{AbsoluteMoniker, AbsoluteMonikerBase}, error::MonikerError, }, core::cmp::Ord, std::fmt, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// One of: /// - An absolute moniker /// - A marker representing component manager's realm #[cfg_attr(feature = "serde", derive(Deserialize, Serialize), serde(rename_all = "snake_case"))] #[derive(Eq, Ord, PartialOrd, PartialEq, Debug, Clone, Hash)] pub enum ExtendedMoniker { ComponentInstance(AbsoluteMoniker), ComponentManager, } /// The string representation of ExtendedMoniker::ComponentManager const EXTENDED_MONIKER_COMPONENT_MANAGER_STR: &'static str = "<component_manager>"; impl ExtendedMoniker { pub fn parse_string_without_instances(rep: &str) -> Result<Self, MonikerError> { if rep == EXTENDED_MONIKER_COMPONENT_MANAGER_STR { Ok(ExtendedMoniker::ComponentManager) } else { Ok(ExtendedMoniker::ComponentInstance(AbsoluteMoniker::parse_string_without_instances( rep, )?)) } } pub fn unwrap_instance_moniker_or<E: std::error::Error>( &self, error: E, ) -> Result<&AbsoluteMoniker, E> { match self { Self::ComponentManager => Err(error), Self::ComponentInstance(moniker) => Ok(moniker), } } pub fn contains_in_realm(&self, other: &ExtendedMoniker) -> bool { match (self, other) { (Self::ComponentManager, _) => true, (Self::ComponentInstance(_), Self::ComponentManager) => false, (Self::ComponentInstance(a), Self::ComponentInstance(b)) => a.contains_in_realm(b), } } pub fn to_string_without_instances(&self) -> String { match self { Self::ComponentInstance(m) => m.to_string_without_instances(), Self::ComponentManager => EXTENDED_MONIKER_COMPONENT_MANAGER_STR.into(), } } } impl fmt::Display for ExtendedMoniker { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::ComponentInstance(m) => { write!(f, "{}", m)?; } Self::ComponentManager => { write!(f, "{}", EXTENDED_MONIKER_COMPONENT_MANAGER_STR)?; } } Ok(()) } } impl From<AbsoluteMoniker> for ExtendedMoniker { fn from(m: AbsoluteMoniker) -> Self { Self::ComponentInstance(m) } } #[cfg(test)] mod tests { use super::*; #[test] fn extended_monikers_parse() { assert_eq!( ExtendedMoniker::parse_string_without_instances(EXTENDED_MONIKER_COMPONENT_MANAGER_STR) .unwrap(), ExtendedMoniker::ComponentManager ); assert_eq!( ExtendedMoniker::parse_string_without_instances("/foo/bar").unwrap(), ExtendedMoniker::ComponentInstance( AbsoluteMoniker::parse_string_without_instances("/foo/bar").unwrap() ) ); assert!(ExtendedMoniker::parse_string_without_instances("").is_err(), "cannot be empty"); assert!( ExtendedMoniker::parse_string_without_instances("foo/bar").is_err(), "must start with /" ); } #[test] fn to_string_functions() { let cm_moniker = ExtendedMoniker::parse_string_without_instances(EXTENDED_MONIKER_COMPONENT_MANAGER_STR) .unwrap(); let foobar_moniker = ExtendedMoniker::parse_string_without_instances("/foo/bar").unwrap(); let empty_moniker = ExtendedMoniker::parse_string_without_instances("/").unwrap(); assert_eq!(format!("{}", cm_moniker), EXTENDED_MONIKER_COMPONENT_MANAGER_STR.to_string()); assert_eq!( cm_moniker.to_string_without_instances(), EXTENDED_MONIKER_COMPONENT_MANAGER_STR.to_string() ); assert_eq!(format!("{}", foobar_moniker), "/foo/bar".to_string()); assert_eq!(foobar_moniker.to_string_without_instances(), "/foo/bar".to_string()); assert_eq!(format!("{}", empty_moniker), "/".to_string()); assert_eq!(empty_moniker.to_string_without_instances(), "/".to_string()); } }
33.969231
99
0.625453
ed2786edf3a3f25d9b09531b35a3dfb989578570
4,366
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-emscripten FIXME(#45351) hits an LLVM assert #![feature(repr_simd, platform_intrinsics, concat_idents, test)] #![allow(non_camel_case_types)] extern crate test; #[repr(simd)] #[derive(PartialEq, Debug)] struct i32x4(i32, i32, i32, i32); #[repr(simd)] #[derive(PartialEq, Debug)] struct i8x4(i8, i8, i8, i8); #[repr(simd)] #[derive(PartialEq, Debug)] struct u32x4(u32, u32, u32, u32); #[repr(simd)] #[derive(PartialEq, Debug)] struct u8x4(u8, u8, u8, u8); #[repr(simd)] #[derive(PartialEq, Debug)] struct f32x4(f32, f32, f32, f32); #[repr(simd)] #[derive(PartialEq, Debug)] struct f64x4(f64, f64, f64, f64); extern "platform-intrinsic" { fn simd_cast<T, U>(x: T) -> U; } const A: i32 = -1234567; const B: i32 = 12345678; const C: i32 = -123456789; const D: i32 = 1234567890; trait Foo { fn is_float() -> bool { false } fn in_range(x: i32) -> bool; } impl Foo for i32 { fn in_range(_: i32) -> bool { true } } impl Foo for i8 { fn in_range(x: i32) -> bool { -128 <= x && x < 128 } } impl Foo for u32 { fn in_range(x: i32) -> bool { 0 <= x } } impl Foo for u8 { fn in_range(x: i32) -> bool { 0 <= x && x < 128 } } impl Foo for f32 { fn is_float() -> bool { true } fn in_range(_: i32) -> bool { true } } impl Foo for f64 { fn is_float() -> bool { true } fn in_range(_: i32) -> bool { true } } fn main() { macro_rules! test { ($from: ident, $to: ident) => {{ // force the casts to actually happen, or else LLVM/rustc // may fold them and get slightly different results. let (a, b, c, d) = test::black_box((A as $from, B as $from, C as $from, D as $from)); // the SIMD vectors are all FOOx4, so we can concat_idents // so we don't have to pass in the extra args to the macro let mut from = simd_cast(concat_idents!($from, x4)(a, b, c, d)); let mut to = concat_idents!($to, x4)(a as $to, b as $to, c as $to, d as $to); // assist type inference, it needs to know what `from` is // for the `if` statements. to == from; // there are platform differences for some out of range // casts, so we just normalize such things: it's OK for // "invalid" calculations to result in nonsense answers. // (E.g. negative float to unsigned integer goes through a // library routine on the default i686 platforms, and the // implementation of that routine differs on e.g. Linux // vs. macOS, resulting in different answers.) if $from::is_float() { if !$to::in_range(A) { from.0 = 0 as $to; to.0 = 0 as $to; } if !$to::in_range(B) { from.1 = 0 as $to; to.1 = 0 as $to; } if !$to::in_range(C) { from.2 = 0 as $to; to.2 = 0 as $to; } if !$to::in_range(D) { from.3 = 0 as $to; to.3 = 0 as $to; } } assert!(to == from, "{} -> {} ({:?} != {:?})", stringify!($from), stringify!($to), from, to); }} } macro_rules! tests { (: $($to: ident),*) => { () }; // repeating the list twice is easier than writing a cartesian // product macro ($from: ident $(, $from_: ident)*: $($to: ident),*) => { fn $from() { unsafe { $( test!($from, $to); )* } } tests!($($from_),*: $($to),*) }; ($($types: ident),*) => {{ tests!($($types),* : $($types),*); $($types();)* }} } // test various combinations, including truncation, // signed/unsigned extension, and floating point casts. tests!(i32, i8, u32, u8, f32); tests!(i32, u32, f32, f64) }
33.584615
97
0.540541
4ae438e6e61d88c3272352914ec6b90c52e0ff4b
2,217
use std::io; fn main() -> io::Result<()> { let mut config = prost_build::Config::new(); config .type_attribute(".test.Scalars", "#[cfg_attr(test, derive(::proptest_derive::Arbitrary))]") .type_attribute(".test.ScalarArrays", "#[cfg_attr(test, derive(::proptest_derive::Arbitrary))]") .type_attribute(".test.ComplexType", "#[cfg_attr(test, derive(::proptest_derive::Arbitrary))]") .type_attribute(".test.WellKnownTypes", "#[cfg_attr(test, derive(::proptest_derive::Arbitrary))]") .field_attribute( ".test.WellKnownTypes.timestamp", "#[cfg_attr(test, proptest(strategy = \"::proptest::option::of(crate::arbitrary::timestamp())\"))]", ) .field_attribute( ".test.WellKnownTypes.duration", "#[cfg_attr(test, proptest(strategy = \"::proptest::option::of(crate::arbitrary::duration())\"))]", ) .field_attribute( ".test.WellKnownTypes.struct", "#[cfg_attr(test, proptest(strategy = \"::proptest::option::of(crate::arbitrary::struct_())\"))]", ) .field_attribute( ".test.WellKnownTypes.list", "#[cfg_attr(test, proptest(strategy = \"::proptest::option::of(crate::arbitrary::list())\"))]", ) .field_attribute( ".test.WellKnownTypes.mask", "#[cfg_attr(test, proptest(strategy = \"::proptest::option::of(crate::arbitrary::mask())\"))]", ) .field_attribute( ".test.WellKnownTypes.empty", "#[cfg_attr(test, proptest(strategy = \"::proptest::option::of(::proptest::strategy::Just(()))\"))]", ) .field_attribute(".test.WellKnownTypes.null", "#[cfg_attr(test, proptest(value= \"0\"))]"); prost_reflect_build::Builder::new() .file_descriptor_expr("crate::TEST_DESCRIPTOR_POOL") .compile_protos_with_config( config, &[ "src/test.proto", "src/test2.proto", "src/desc.proto", "src/desc2.proto", "src/desc_no_package.proto", "src/imports.proto", ], &["src/"], )?; Ok(()) }
42.634615
113
0.553
3846922cdd6ad28f9b3f04195684b4018b74e74f
598
#![allow(dead_code)] #![allow(unused_imports)] use itertools::Itertools; use proconio::marker::{Bytes, Chars, Usize1}; use proconio::*; use std::cmp::*; use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque}; use std::io; use std::mem::*; #[fastout] fn main() { input! { s: Chars, } let mut answer = Vec::with_capacity(s.len()); for c in s.into_iter().rev() { let x = match c { '6' => '9', '9' => '6', _ => c, }; answer.push(x); } println!("{}", answer.iter().join("")); }
19.290323
83
0.521739
09de6ade5e019f0b21070fabf296a9521ac35e69
2,848
use crate::commands::WholeStreamCommand; use crate::data::TaggedListBuilder; use crate::prelude::*; use crate::utils::data_processing::{columns_sorted, t_sort}; use chrono::{DateTime, NaiveDate, Utc}; use nu_errors::ShellError; use nu_protocol::{ Primitive, ReturnSuccess, Signature, SyntaxShape, TaggedDictBuilder, UntaggedValue, Value, }; use nu_source::Tagged; use nu_value_ext::get_data_by_key; pub struct TSortBy; #[derive(Deserialize)] pub struct TSortByArgs { #[serde(rename(deserialize = "show-columns"))] show_columns: bool, group_by: Option<Tagged<String>>, #[allow(unused)] split_by: Option<String>, } impl WholeStreamCommand for TSortBy { fn name(&self) -> &str { "t-sort-by" } fn signature(&self) -> Signature { Signature::build("t-sort-by") .switch( "show-columns", "Displays the column names sorted", Some('c'), ) .named( "group_by", SyntaxShape::String, "the name of the column to group by", Some('g'), ) .named( "split_by", SyntaxShape::String, "the name of the column within the grouped by table to split by", Some('s'), ) } fn usage(&self) -> &str { "Sort by the given columns." } fn run( &self, args: CommandArgs, registry: &CommandRegistry, ) -> Result<OutputStream, ShellError> { t_sort_by(args, registry) } } fn t_sort_by(args: CommandArgs, registry: &CommandRegistry) -> Result<OutputStream, ShellError> { let registry = registry.clone(); let stream = async_stream! { let name = args.call_info.name_tag.clone(); let (TSortByArgs { show_columns, group_by, ..}, mut input) = args.process(&registry).await?; let values: Vec<Value> = input.collect().await; let column_grouped_by_name = if let Some(grouped_by) = group_by { Some(grouped_by.item().clone()) } else { None }; if show_columns { for label in columns_sorted(column_grouped_by_name, &values[0], &name).into_iter() { yield ReturnSuccess::value(UntaggedValue::string(label.item).into_value(label.tag)); } } else { match t_sort(column_grouped_by_name, None, &values[0], name) { Ok(sorted) => yield ReturnSuccess::value(sorted), Err(err) => yield Err(err) } } }; Ok(stream.to_output_stream()) } #[cfg(test)] mod tests { use super::TSortBy; #[test] fn examples_work_as_expected() { use crate::examples::test as test_examples; test_examples(TSortBy {}) } }
27.921569
101
0.573385
619e51774b20b5a6663f92d9b5465edb9239cd02
14
Hotel2.Hotel2
7
13
0.857143
0ef787c973396b69ae562a7a23b0d3e2db50577d
26,373
// This file is part of the SORA network and Polkaswap app. // Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved. // SPDX-License-Identifier: BSD-4-Clause // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // Redistributions of source code must retain the above copyright notice, this list // of conditions and the following disclaimer. // Redistributions in binary form must reproduce the above copyright notice, this // list of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. // // All advertising materials mentioning features or use of this software must display // the following acknowledgement: This product includes software developed by Polka Biome // Ltd., SORA, and Polkaswap. // // Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used // to endorse or promote products derived from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! # Assets Pallet //! //! ## Overview //! //! The assets module serves as an extension of `currencies` pallet. //! It allows to explicitly register new assets and store their owners' account IDs. //! This allows to restrict some actions on assets for non-owners. //! //! ### Dispatchable Functions //! //! - `register` - registers new asset by a given ID. // TODO: add info about weight #![cfg_attr(not(feature = "std"), no_std)] #[allow(unused_imports)] #[macro_use] extern crate alloc; pub mod weights; mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] mod tests; use codec::{Decode, Encode}; use common::prelude::Balance; use common::{hash, Amount, AssetName, AssetSymbol, BalancePrecision, DEFAULT_BALANCE_PRECISION}; use frame_support::dispatch::{DispatchError, DispatchResult}; use frame_support::sp_runtime::traits::{MaybeSerializeDeserialize, Member}; use frame_support::traits::Get; use frame_support::weights::Weight; use frame_support::{ensure, Parameter}; use frame_system::ensure_signed; use permissions::{Scope, BURN, MINT, TRANSFER}; use sp_core::hash::H512; use sp_core::H256; use sp_runtime::traits::Zero; use sp_std::vec::Vec; use tiny_keccak::{Hasher, Keccak}; use traits::{ MultiCurrency, MultiCurrencyExtended, MultiLockableCurrency, MultiReservableCurrency, }; pub trait WeightInfo { fn register() -> Weight; fn transfer() -> Weight; fn mint() -> Weight; fn burn() -> Weight; fn set_non_mintable() -> Weight; } pub type AssetIdOf<T> = <T as Config>::AssetId; pub type Permissions<T> = permissions::Pallet<T>; type AccountIdOf<T> = <T as frame_system::Config>::AccountId; type CurrencyIdOf<T> = <<T as Config>::Currency as MultiCurrency<<T as frame_system::Config>::AccountId>>::CurrencyId; const ASSET_SYMBOL_MAX_LENGTH: usize = 7; const ASSET_NAME_MAX_LENGTH: usize = 33; const MAX_ALLOWED_PRECISION: u8 = 18; #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] pub enum AssetRecordArg<T: Config> { GenericI32(i32), GenericU64(u64), GenericU128(u128), GenericU8x32([u8; 32]), GenericH256(H256), GenericH512(H512), LeafAssetId(AssetIdOf<T>), AssetRecordAssetId(AssetIdOf<T>), Extra(T::ExtraAssetRecordArg), } #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] pub enum AssetRecord<T: Config> { Arity0, Arity1(AssetRecordArg<T>), Arity2(AssetRecordArg<T>, AssetRecordArg<T>), Arity3(AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>), Arity4( AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, ), Arity5( AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, ), Arity6( AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, ), Arity7( AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, ), Arity8( AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, ), Arity9( AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, AssetRecordArg<T>, ), } pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config + permissions::Config + tokens::Config + common::Config { type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; type ExtraAccountId: Clone + Copy + Encode + Decode + Eq + PartialEq + From<Self::AccountId> + Into<Self::AccountId>; type ExtraAssetRecordArg: Clone + Copy + Encode + Decode + Eq + PartialEq + From<common::AssetIdExtraAssetRecordArg<Self::DEXId, Self::LstId, Self::ExtraAccountId>> + Into<common::AssetIdExtraAssetRecordArg<Self::DEXId, Self::LstId, Self::ExtraAccountId>>; /// DEX assets (currency) identifier. type AssetId: Parameter + Member + Copy + MaybeSerializeDeserialize + Ord + Default + Into<CurrencyIdOf<Self>> + From<common::AssetId32<common::PredefinedAssetId>> + From<H256> + Into<H256> + Into<<Self as tokens::Config>::CurrencyId>; /// The base asset as the core asset in all trading pairs type GetBaseAssetId: Get<Self::AssetId>; /// Currency to transfer, reserve/unreserve, lock/unlock assets type Currency: MultiLockableCurrency< Self::AccountId, Moment = Self::BlockNumber, CurrencyId = Self::AssetId, Balance = Balance, > + MultiReservableCurrency<Self::AccountId, CurrencyId = Self::AssetId, Balance = Balance> + MultiCurrencyExtended<Self::AccountId, Amount = Amount>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet<T>(PhantomData<T>); #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {} #[pallet::call] impl<T: Config> Pallet<T> { /// Performs an asset registration. /// /// Registers new `AssetId` for the given `origin`. /// AssetSymbol should represent string with only uppercase latin chars with max length of 7. /// AssetName should represent string with only uppercase or lowercase latin chars or numbers or spaces, with max length of 33. #[pallet::weight(<T as Config>::WeightInfo::register())] pub fn register( origin: OriginFor<T>, symbol: AssetSymbol, name: AssetName, initial_supply: Balance, is_mintable: bool, ) -> DispatchResultWithPostInfo { let author = ensure_signed(origin)?; let _asset_id = Self::register_from( &author, symbol, name, DEFAULT_BALANCE_PRECISION, initial_supply, is_mintable, )?; Ok(().into()) } /// Performs a checked Asset transfer. /// /// - `origin`: caller Account, from which Asset amount is withdrawn, /// - `asset_id`: Id of transferred Asset, /// - `to`: Id of Account, to which Asset amount is deposited, /// - `amount`: transferred Asset amount. #[pallet::weight(<T as Config>::WeightInfo::transfer())] pub fn transfer( origin: OriginFor<T>, asset_id: T::AssetId, to: T::AccountId, amount: Balance, ) -> DispatchResultWithPostInfo { let from = ensure_signed(origin.clone())?; Self::transfer_from(&asset_id, &from, &to, amount)?; Self::deposit_event(Event::Transfer(from, to, asset_id, amount)); Ok(().into()) } /// Performs a checked Asset mint, can only be done /// by corresponding asset owner account. /// /// - `origin`: caller Account, which issues Asset minting, /// - `asset_id`: Id of minted Asset, /// - `to`: Id of Account, to which Asset amount is minted, /// - `amount`: minted Asset amount. #[pallet::weight(<T as Config>::WeightInfo::mint())] pub fn mint( origin: OriginFor<T>, asset_id: T::AssetId, to: T::AccountId, amount: Balance, ) -> DispatchResultWithPostInfo { let issuer = ensure_signed(origin.clone())?; Self::mint_to(&asset_id, &issuer, &to, amount)?; Self::deposit_event(Event::Mint(issuer, to, asset_id.clone(), amount)); Ok(().into()) } /// Performs a checked Asset burn, can only be done /// by corresponding asset owner with own account. /// /// - `origin`: caller Account, from which Asset amount is burned, /// - `asset_id`: Id of burned Asset, /// - `amount`: burned Asset amount. #[pallet::weight(<T as Config>::WeightInfo::burn())] pub fn burn( origin: OriginFor<T>, asset_id: T::AssetId, amount: Balance, ) -> DispatchResultWithPostInfo { let issuer = ensure_signed(origin.clone())?; Self::burn_from(&asset_id, &issuer, &issuer, amount)?; Self::deposit_event(Event::Burn(issuer, asset_id.clone(), amount)); Ok(().into()) } /// Set given asset to be non-mintable, i.e. it can no longer be minted, only burned. /// Operation can not be undone. /// /// - `origin`: caller Account, should correspond to Asset owner /// - `asset_id`: Id of burned Asset, #[pallet::weight(<T as Config>::WeightInfo::set_non_mintable())] pub fn set_non_mintable( origin: OriginFor<T>, asset_id: T::AssetId, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin.clone())?; Self::set_non_mintable_from(&asset_id, &who)?; Self::deposit_event(Event::AssetSetNonMintable(asset_id.clone())); Ok(().into()) } } #[pallet::event] #[pallet::metadata(AccountIdOf<T> = "AccountId", AssetIdOf<T> = "AssetId")] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { /// New asset has been registered. [Asset Id, Asset Owner Account] AssetRegistered(AssetIdOf<T>, AccountIdOf<T>), /// Asset amount has been transfered. [From Account, To Account, Tranferred Asset Id, Amount Transferred] Transfer(AccountIdOf<T>, AccountIdOf<T>, AssetIdOf<T>, Balance), /// Asset amount has been minted. [Issuer Account, Target Account, Minted Asset Id, Amount Minted] Mint(AccountIdOf<T>, AccountIdOf<T>, AssetIdOf<T>, Balance), /// Asset amount has been burned. [Issuer Account, Burned Asset Id, Amount Burned] Burn(AccountIdOf<T>, AssetIdOf<T>, Balance), /// Asset is set as non-mintable. [Target Asset Id] AssetSetNonMintable(AssetIdOf<T>), } #[pallet::error] pub enum Error<T> { /// An asset with a given ID already exists. AssetIdAlreadyExists, /// An asset with a given ID not exists. AssetIdNotExists, /// A number is out of range of the balance type. InsufficientBalance, /// Symbol is not valid. It must contain only uppercase latin characters, length <= 7. InvalidAssetSymbol, /// Name is not valid. It must contain only uppercase or lowercase latin characters or numbers or spaces, length <= 33. InvalidAssetName, /// Precision value is not valid, it should represent a number of decimal places for number, max is 30. InvalidPrecision, /// Minting for particular asset id is disabled. AssetSupplyIsNotMintable, /// Caller does not own requested asset. InvalidAssetOwner, /// Increment account reference error. IncRefError, } /// Asset Id -> Owner Account Id #[pallet::storage] #[pallet::getter(fn asset_owners)] pub(super) type AssetOwners<T: Config> = StorageMap<_, Twox64Concat, T::AssetId, T::AccountId, ValueQuery>; /// Asset Id -> (Symbol, Precision, Is Mintable) #[pallet::storage] #[pallet::getter(fn asset_infos)] pub type AssetInfos<T: Config> = StorageMap< _, Twox64Concat, T::AssetId, (AssetSymbol, AssetName, BalancePrecision, bool), ValueQuery, >; /// Asset Id -> AssetRecord<T> #[pallet::storage] #[pallet::getter(fn tuple_from_asset_id)] pub type AssetRecordAssetId<T: Config> = StorageMap<_, Twox64Concat, T::AssetId, AssetRecord<T>>; #[pallet::genesis_config] pub struct GenesisConfig<T: Config> { pub endowed_assets: Vec<( T::AssetId, T::AccountId, AssetSymbol, AssetName, BalancePrecision, Balance, bool, )>, } #[cfg(feature = "std")] impl<T: Config> Default for GenesisConfig<T> { fn default() -> Self { Self { endowed_assets: Default::default(), } } } #[pallet::genesis_build] impl<T: Config> GenesisBuild<T> for GenesisConfig<T> { fn build(&self) { self.endowed_assets.iter().cloned().for_each( |(asset_id, account_id, symbol, name, precision, initial_supply, is_mintable)| { Pallet::<T>::register_asset_id( account_id, asset_id, symbol, name, precision, initial_supply, is_mintable, ) .expect("Failed to register asset."); }, ) } } } impl<T: Config> Pallet<T> { /// Generates an `AssetId` for the given `AssetRecord<T>`, and insert record to storage map. pub fn register_asset_id_from_tuple(tuple: &AssetRecord<T>) -> T::AssetId { let mut keccak = Keccak::v256(); keccak.update(b"From AssetRecord"); keccak.update(&tuple.encode()); let mut output = [0u8; 32]; keccak.finalize(&mut output); // More safe to escape. output[0] = 0u8; let asset_id = T::AssetId::from(H256(output)); AssetRecordAssetId::<T>::insert(asset_id, tuple); asset_id } /// Generates an `AssetId` for the given `AccountId`. pub fn gen_asset_id(account_id: &T::AccountId) -> T::AssetId { let mut keccak = Keccak::v256(); keccak.update(b"Sora Asset Id"); keccak.update(&account_id.encode()); keccak.update(&frame_system::Pallet::<T>::account_nonce(&account_id).encode()); let mut output = [0u8; 32]; keccak.finalize(&mut output); // More safe to escape. output[0] = 0u8; T::AssetId::from(H256(output)) } /// Register the given `AssetId`. pub fn register_asset_id( account_id: T::AccountId, asset_id: T::AssetId, symbol: AssetSymbol, name: AssetName, precision: BalancePrecision, initial_supply: Balance, is_mintable: bool, ) -> DispatchResult { ensure!( Self::asset_owner(&asset_id).is_none(), Error::<T>::AssetIdAlreadyExists ); frame_system::Pallet::<T>::inc_consumers(&account_id) .map_err(|_| Error::<T>::IncRefError)?; AssetOwners::<T>::insert(asset_id, account_id.clone()); ensure!( crate::is_symbol_valid(&symbol), Error::<T>::InvalidAssetSymbol ); ensure!(crate::is_name_valid(&name), Error::<T>::InvalidAssetName); AssetInfos::<T>::insert(asset_id, (symbol, name, precision, is_mintable)); ensure!( precision <= MAX_ALLOWED_PRECISION, Error::<T>::InvalidPrecision ); let scope = Scope::Limited(hash(&asset_id)); let permission_ids = [TRANSFER, MINT, BURN]; for permission_id in &permission_ids { Permissions::<T>::assign_permission( account_id.clone(), &account_id, *permission_id, scope, )?; } if !initial_supply.is_zero() { T::Currency::deposit(asset_id.clone(), &account_id, initial_supply)?; } frame_system::Pallet::<T>::inc_account_nonce(&account_id); Self::deposit_event(Event::AssetRegistered(asset_id, account_id)); Ok(()) } /// Generates new `AssetId` and registers it from the `account_id`. pub fn register_from( account_id: &T::AccountId, symbol: AssetSymbol, name: AssetName, precision: BalancePrecision, initial_supply: Balance, is_mintable: bool, ) -> Result<T::AssetId, DispatchError> { common::with_transaction(|| { let asset_id = Self::gen_asset_id(account_id); Self::register_asset_id( account_id.clone(), asset_id, symbol, name, precision, initial_supply, is_mintable, )?; Ok(asset_id) }) } pub fn asset_owner(asset_id: &T::AssetId) -> Option<T::AccountId> { let account_id = Self::asset_owners(&asset_id); if account_id == T::AccountId::default() { None } else { Some(account_id) } } #[inline] pub fn asset_exists(asset_id: &T::AssetId) -> bool { Self::asset_owner(asset_id).is_some() } pub fn ensure_asset_exists(asset_id: &T::AssetId) -> DispatchResult { if !Self::asset_exists(asset_id) { return Err(Error::<T>::AssetIdNotExists.into()); } Ok(()) } #[inline] pub fn is_asset_owner(asset_id: &T::AssetId, account_id: &T::AccountId) -> bool { Self::asset_owner(asset_id) .map(|x| &x == account_id) .unwrap_or(false) } fn check_permission_maybe_with_parameters( issuer: &T::AccountId, permission_id: u32, asset_id: &T::AssetId, ) -> Result<(), DispatchError> { Permissions::<T>::check_permission_with_scope( issuer.clone(), permission_id, &Scope::Limited(hash(asset_id)), ) .or_else(|_| { Permissions::<T>::check_permission_with_scope( issuer.clone(), permission_id, &Scope::Unlimited, ) })?; Ok(()) } pub fn total_issuance(asset_id: &T::AssetId) -> Result<Balance, DispatchError> { Self::ensure_asset_exists(asset_id)?; Ok(T::Currency::total_issuance(asset_id.clone())) } pub fn total_balance( asset_id: &T::AssetId, who: &T::AccountId, ) -> Result<Balance, DispatchError> { Self::ensure_asset_exists(asset_id)?; Ok(T::Currency::total_balance(asset_id.clone(), who)) } pub fn free_balance( asset_id: &T::AssetId, who: &T::AccountId, ) -> Result<Balance, DispatchError> { Self::ensure_asset_exists(asset_id)?; Ok(T::Currency::free_balance(asset_id.clone(), who)) } pub fn ensure_can_withdraw( asset_id: &T::AssetId, who: &T::AccountId, amount: Balance, ) -> DispatchResult { Self::ensure_asset_exists(asset_id)?; Self::check_permission_maybe_with_parameters(who, TRANSFER, asset_id)?; T::Currency::ensure_can_withdraw(asset_id.clone(), who, amount) } pub fn transfer_from( asset_id: &T::AssetId, from: &T::AccountId, to: &T::AccountId, amount: Balance, ) -> DispatchResult { Self::ensure_asset_exists(asset_id)?; Self::check_permission_maybe_with_parameters(from, TRANSFER, asset_id)?; T::Currency::transfer(asset_id.clone(), from, to, amount) } pub fn force_transfer( asset_id: &T::AssetId, from: &T::AccountId, to: &T::AccountId, amount: Balance, ) -> DispatchResult { T::Currency::transfer(asset_id.clone(), from, to, amount) } pub fn mint_to( asset_id: &T::AssetId, issuer: &T::AccountId, to: &T::AccountId, amount: Balance, ) -> DispatchResult { Self::ensure_asset_exists(asset_id)?; Self::check_permission_maybe_with_parameters(issuer, MINT, asset_id)?; let (_, _, _, is_mintable) = AssetInfos::<T>::get(asset_id); ensure!(is_mintable, Error::<T>::AssetSupplyIsNotMintable); T::Currency::deposit(asset_id.clone(), to, amount) } pub fn burn_from( asset_id: &T::AssetId, issuer: &T::AccountId, to: &T::AccountId, amount: Balance, ) -> DispatchResult { Self::ensure_asset_exists(asset_id)?; // Holder can burn its funds. if issuer != to { Self::check_permission_maybe_with_parameters(issuer, BURN, asset_id)?; } T::Currency::withdraw(*asset_id, to, amount) } pub fn update_balance( asset_id: &T::AssetId, who: &T::AccountId, by_amount: Amount, ) -> DispatchResult { Self::check_permission_maybe_with_parameters(who, MINT, asset_id)?; Self::check_permission_maybe_with_parameters(who, BURN, asset_id)?; if by_amount.is_positive() { let (_, _, _, is_mintable) = AssetInfos::<T>::get(asset_id); ensure!(is_mintable, Error::<T>::AssetSupplyIsNotMintable); } T::Currency::update_balance(asset_id.clone(), who, by_amount) } pub fn can_reserve(asset_id: T::AssetId, who: &T::AccountId, amount: Balance) -> bool { T::Currency::can_reserve(asset_id, who, amount) } pub fn reserve( asset_id: T::AssetId, who: &T::AccountId, amount: Balance, ) -> Result<(), DispatchError> { Self::ensure_asset_exists(&asset_id)?; T::Currency::reserve(asset_id, who, amount) } pub fn unreserve( asset_id: T::AssetId, who: &T::AccountId, amount: Balance, ) -> Result<Balance, DispatchError> { Self::ensure_asset_exists(&asset_id)?; let amount = T::Currency::unreserve(asset_id, who, amount); Ok(amount) } pub fn set_non_mintable_from(asset_id: &T::AssetId, who: &T::AccountId) -> DispatchResult { ensure!( Self::is_asset_owner(asset_id, who), Error::<T>::InvalidAssetOwner ); AssetInfos::<T>::mutate(asset_id, |(_, _, _, ref mut is_mintable)| { ensure!(*is_mintable, Error::<T>::AssetSupplyIsNotMintable); *is_mintable = false; Ok(()) }) } pub fn list_registered_asset_ids() -> Vec<T::AssetId> { AssetInfos::<T>::iter().map(|(key, _)| key).collect() } pub fn list_registered_asset_infos( ) -> Vec<(T::AssetId, AssetSymbol, AssetName, BalancePrecision, bool)> { AssetInfos::<T>::iter() .map(|(key, (symbol, name, precision, is_mintable))| { (key, symbol, name, precision, is_mintable) }) .collect() } pub fn get_asset_info( asset_id: &T::AssetId, ) -> (AssetSymbol, AssetName, BalancePrecision, bool) { AssetInfos::<T>::get(asset_id) } } /// According to UTF-8 encoding, graphemes that start with byte 0b0XXXXXXX belong /// to ASCII range and are of single byte, therefore passing check in range 'A' to 'Z' /// guarantees that all graphemes are of length 1, therefore length check is valid. pub fn is_symbol_valid(symbol: &AssetSymbol) -> bool { symbol.0.len() <= ASSET_SYMBOL_MAX_LENGTH && symbol.0.iter().all(|byte| (b'A'..=b'Z').contains(&byte)) } /// According to UTF-8 encoding, graphemes that start with byte 0b0XXXXXXX belong /// to ASCII range and are of single byte, therefore passing check in range 'A' to 'z' /// guarantees that all graphemes are of length 1, therefore length check is valid. pub fn is_name_valid(name: &AssetName) -> bool { let mut allowed_graphemes = (b'A'..=b'Z').collect::<Vec<_>>(); allowed_graphemes.extend(b'a'..=b'z'); allowed_graphemes.extend(b'0'..=b'9'); allowed_graphemes.push(b' '); name.0.len() <= ASSET_NAME_MAX_LENGTH && name.0.iter().all(|byte| allowed_graphemes.contains(&byte)) }
34.977454
135
0.598529
eb84204de9362259a7a485a6702a01d0ab828e3a
4,354
use std::collections::HashSet; use std::collections::VecDeque; use std::iter::FromIterator; use ring::digest; use PeerId; /// /// Inspector for incoming transitions. /// /// This trait has to be implemented for anyone using this library. It /// checks an unknown translation and gives functions to store such /// translations and retrieve it later from a database. pub trait Inspector { fn approve(&self, trans: &Transition) -> bool; fn store(&self, trans: Transition); fn restore(&self, keys: Vec<TransitionKey>) -> Option<Vec<Transition>>; fn tips(&self) -> Vec<TransitionKey>; fn has(&self, key: &TransitionKey) -> bool; fn get_file(&self, key: &[u8]) -> Option<Vec<u8>>; fn subgraph(&self, mut tips: Vec<Transition>) -> Vec<Transition> { // create a sample of the subgraph, starting by the given tips let mut in_transitions: HashSet<Transition> = HashSet::from_iter(tips.iter().cloned()); while in_transitions.len() < 64 && !tips.is_empty() { tips = tips.into_iter() .map(|x| { let refs = x.refs.into_iter().filter(|x| self.has(&x)).collect(); self.restore(refs).unwrap() }).flatten().collect(); for tip in &tips { in_transitions.insert(tip.clone()); } } trace!("Got {} transitions for checking", in_transitions.len()); // start at our tips and run till we reach the sampled transitions let tips = self.restore(self.tips()).unwrap(); let mut queue = VecDeque::from_iter(tips.iter().cloned()); let mut transitions = Vec::new(); let mut inserted = HashSet::new(); while !queue.is_empty() { let a = match queue.pop_front() { Some(x) => { if inserted.contains(&x.key) { continue; } if !in_transitions.contains(&x) { trace!("Transition {}", x.key.to_string()); transitions.push(x.clone()); inserted.insert(x.key.clone()); } x }, None => break }; let refs = a.refs.into_iter().filter(|x| self.has(&x)).collect(); for b in self.restore(refs).unwrap() { if !in_transitions.contains(&b) { queue.push_back(b); } } } transitions.sort_unstable_by_key(|x| x.key.0); transitions.dedup(); trace!("Returning {} transitions", transitions.len()); transitions } } /// Transition key is the 256bit hash of the body #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, Hash)] pub struct TransitionKey(pub [u8; 32]); impl TransitionKey { pub fn from_vec(buf: &[u8]) -> TransitionKey { let mut key = TransitionKey([0; 32]); key.0.copy_from_slice(&buf); key } pub fn to_string(&self) -> String { let mut tmp = String::new(); for i in 0..32 { tmp.push_str(&format!("{:02X}", (self.0)[i])); } tmp } } /// A signed transition in a DAG #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, Hash)] pub struct Transition { pub key: TransitionKey, pub pk: PeerId, pub refs: Vec<TransitionKey>, pub body: Option<Vec<u8>>, pub sign: [u8; 32], pub state: u8 } impl Transition { /// Ignore signature for now pub fn new(pk: PeerId, refs: Vec<TransitionKey>, data: Vec<u8>) -> Transition { let mut tmp = Transition { key: TransitionKey([0u8; 32]), pk, refs, body: Some(data), sign: [0; 32], state: 2 }; tmp.key = tmp.key(); tmp } pub fn key(&self) -> TransitionKey { let mut key = TransitionKey([0u8; 32]); // build buffer from refs and body let mut buf = Vec::new(); for a in &self.refs { buf.extend_from_slice(&a.0); } buf.extend_from_slice(&self.body.clone().unwrap()); let hash = digest::digest(&digest::SHA256, &buf); key.0.copy_from_slice(&hash.as_ref()); key } }
28.457516
95
0.53514
0e838854156f18986662c89c975b37d6a5a2f2d6
1,990
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #![cfg_attr(feature = "mesalock_sgx", no_std)] #[cfg(feature = "mesalock_sgx")] extern crate sgx_tstd as std; #[cfg(feature = "mesalock_sgx")] use std::prelude::v1::*; mod echo; mod gbdt_predict; mod gbdt_train; mod logistic_regression_predict; mod logistic_regression_train; mod online_decrypt; mod private_join_and_compute; pub use echo::Echo; pub use gbdt_predict::GbdtPredict; pub use gbdt_train::GbdtTrain; pub use logistic_regression_predict::LogisticRegressionPredict; pub use logistic_regression_train::LogisticRegressionTrain; pub use online_decrypt::OnlineDecrypt; pub use private_join_and_compute::PrivateJoinAndCompute; #[cfg(feature = "enclave_unit_test")] pub mod tests { use super::*; use teaclave_test_utils::check_all_passed; pub fn run_tests() -> bool { check_all_passed!( echo::tests::run_tests(), gbdt_train::tests::run_tests(), gbdt_predict::tests::run_tests(), logistic_regression_train::tests::run_tests(), logistic_regression_predict::tests::run_tests(), online_decrypt::tests::run_tests(), private_join_and_compute::tests::run_tests(), ) } }
34.310345
63
0.731658
f797fab9a60053b2b7b63fd3ee05e5721c872578
314
//! Prints the runtime's execution log on the standard output. use async_std::task; fn main() { femme::with_level(log::LevelFilter::Trace); task::block_on(async { let handle = task::spawn(async { log::info!("Hello world!"); }); handle.await; }) }
19.625
63
0.547771
565b9c7b1ca546c1a080eae7425090ec2138dba0
543
use anyhow::{anyhow, Result}; pub(crate) fn get_with_default<T: std::str::FromStr>(key: &str, def: &str) -> Result<T, T::Err> { if let Ok(v) = dotenv::var(key) { if v == "" { def.to_owned().parse::<T>() } else { v.parse::<T>() } } else { def.to_owned().parse::<T>() } } pub(crate) fn get<T: std::str::FromStr>(key: &str) -> Result<T> { let t = dotenv::var(key)?; match t.parse::<T>() { Ok(v) => Ok(v), Err(_) => Err(anyhow!("parser error")), } }
25.857143
97
0.467772
8994df73c1087f8a93ab90e78374eb523984d796
366
//! Tests auto-converted from "sass-spec/spec/non_conformant/basic/39_dash_match_attribute_selector.hrx" #[test] fn test() { assert_eq!( crate::rsass( "div[class|=\"blah\"] {\ \n color: blue;\ \n}" ) .unwrap(), "div[class|=blah] {\ \n color: blue;\ \n}\ \n" ); }
20.333333
104
0.464481
4a85bf4c772cfa7b5c7a4deec3dda1631dd73d2c
185
#[macro_use] extern crate xplane_plugin; use xplane_plugin::*; extern crate xplm; extern crate xpgl; extern crate mapcore; mod plugin; xplane_plugin!(plugin::TerrainDisplayPlugin);
14.230769
45
0.783784
6765932e732751b7f8a1b0380d93a6d0c65bd455
2,474
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::CSR5 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct CSR5R { bits: u32, } impl CSR5R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Proxy"] pub struct _CSR5W<'a> { w: &'a mut W, } impl<'a> _CSR5W<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:31 - CSR5"] #[inline] pub fn csr5(&self) -> CSR5R { let bits = { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; CSR5R { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:31 - CSR5"] #[inline] pub fn csr5(&mut self) -> _CSR5W { _CSR5W { w: self } } }
23.339623
59
0.490299
018d67e78a38b65e37c2bc7df839d5cba926a605
730
#[cfg(test)] mod tests { use super::*; use crate::oxide::Oxide; #[test] fn create_oxide_app() { let app = Oxide::new(300, 400); assert_eq!(400 , app.settings.width); assert_eq!(300 , app.settings.height); assert_eq!(0 , app.settings.min_width); assert_eq!(0 , app.settings.max_width); assert_eq!(false, app.settings.always_on_top); assert_eq!(false, app.settings.full_screen); assert_eq!(true , app.settings.minimizable); assert_eq!(true , app.settings.maximizable); assert_eq!(true , app.settings.movable); assert_eq!("App", app.settings.title); assert_eq!("#FFFFFF", app.settings.background_color); } }
33.181818
61
0.609589
095059c5183676232c48b36093a244a9fe69ba3b
8,690
// This file is Copyright its original authors, visible in version control // history. // // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // You may not use this file except in accordance with one or both of these // licenses. use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::transaction::TxOut; use bitcoin::hash_types::BlockHash; use lightning::chain; use lightning::chain::transaction::OutPoint; use lightning::ln::channelmanager::{ChannelDetails, ChannelCounterparty}; use lightning::ln::features::InitFeatures; use lightning::ln::msgs; use lightning::routing::router::{find_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters}; use lightning::routing::scoring::FixedPenaltyScorer; use lightning::util::logger::Logger; use lightning::util::ser::Readable; use lightning::routing::network_graph::{NetworkGraph, RoutingFees}; use bitcoin::hashes::Hash; use bitcoin::secp256k1::key::PublicKey; use bitcoin::network::constants::Network; use bitcoin::blockdata::constants::genesis_block; use utils::test_logger; use std::collections::HashSet; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; #[inline] pub fn slice_to_be16(v: &[u8]) -> u16 { ((v[0] as u16) << 8*1) | ((v[1] as u16) << 8*0) } #[inline] pub fn slice_to_be32(v: &[u8]) -> u32 { ((v[0] as u32) << 8*3) | ((v[1] as u32) << 8*2) | ((v[2] as u32) << 8*1) | ((v[3] as u32) << 8*0) } #[inline] pub fn slice_to_be64(v: &[u8]) -> u64 { ((v[0] as u64) << 8*7) | ((v[1] as u64) << 8*6) | ((v[2] as u64) << 8*5) | ((v[3] as u64) << 8*4) | ((v[4] as u64) << 8*3) | ((v[5] as u64) << 8*2) | ((v[6] as u64) << 8*1) | ((v[7] as u64) << 8*0) } struct InputData { data: Vec<u8>, read_pos: AtomicUsize, } impl InputData { fn get_slice(&self, len: usize) -> Option<&[u8]> { let old_pos = self.read_pos.fetch_add(len, Ordering::AcqRel); if self.data.len() < old_pos + len { return None; } Some(&self.data[old_pos..old_pos + len]) } fn get_slice_nonadvancing(&self, len: usize) -> Option<&[u8]> { let old_pos = self.read_pos.load(Ordering::Acquire); if self.data.len() < old_pos + len { return None; } Some(&self.data[old_pos..old_pos + len]) } } struct FuzzChainSource { input: Arc<InputData>, } impl chain::Access for FuzzChainSource { fn get_utxo(&self, _genesis_hash: &BlockHash, _short_channel_id: u64) -> Result<TxOut, chain::AccessError> { match self.input.get_slice(2) { Some(&[0, _]) => Err(chain::AccessError::UnknownChain), Some(&[1, _]) => Err(chain::AccessError::UnknownTx), Some(&[_, x]) => Ok(TxOut { value: 0, script_pubkey: Builder::new().push_int(x as i64).into_script().to_v0_p2wsh() }), None => Err(chain::AccessError::UnknownTx), _ => unreachable!(), } } } #[inline] pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) { let input = Arc::new(InputData { data: data.to_vec(), read_pos: AtomicUsize::new(0), }); macro_rules! get_slice_nonadvancing { ($len: expr) => { match input.get_slice_nonadvancing($len as usize) { Some(slice) => slice, None => return, } } } macro_rules! get_slice { ($len: expr) => { match input.get_slice($len as usize) { Some(slice) => slice, None => return, } } } macro_rules! decode_msg { ($MsgType: path, $len: expr) => {{ let mut reader = ::std::io::Cursor::new(get_slice!($len)); match <$MsgType>::read(&mut reader) { Ok(msg) => { assert_eq!(reader.position(), $len as u64); msg }, Err(e) => match e { msgs::DecodeError::UnknownVersion => return, msgs::DecodeError::UnknownRequiredFeature => return, msgs::DecodeError::InvalidValue => return, msgs::DecodeError::BadLengthDescriptor => return, msgs::DecodeError::ShortRead => panic!("We picked the length..."), msgs::DecodeError::Io(e) => panic!("{:?}", e), msgs::DecodeError::UnsupportedCompression => return, } } }} } macro_rules! decode_msg_with_len16 { ($MsgType: path, $excess: expr) => { { let extra_len = slice_to_be16(get_slice_nonadvancing!(2)); decode_msg!($MsgType, 2 + (extra_len as usize) + $excess) } } } macro_rules! get_pubkey { () => { match PublicKey::from_slice(get_slice!(33)) { Ok(key) => key, Err(_) => return, } } } let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new("".to_owned(), out)); let our_pubkey = get_pubkey!(); let net_graph = NetworkGraph::new(genesis_block(Network::Bitcoin).header.block_hash()); let mut node_pks = HashSet::new(); let mut scid = 42; loop { match get_slice!(1)[0] { 0 => { let start_len = slice_to_be16(&get_slice_nonadvancing!(2)[0..2]) as usize; let addr_len = slice_to_be16(&get_slice_nonadvancing!(start_len+2 + 74)[start_len+2 + 72..start_len+2 + 74]); if addr_len > (37+1)*4 { return; } let msg = decode_msg_with_len16!(msgs::UnsignedNodeAnnouncement, 288); node_pks.insert(msg.node_id); let _ = net_graph.update_node_from_unsigned_announcement(&msg); }, 1 => { let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4); node_pks.insert(msg.node_id_1); node_pks.insert(msg.node_id_2); let _ = net_graph.update_channel_from_unsigned_announcement::<&FuzzChainSource>(&msg, &None); }, 2 => { let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4); node_pks.insert(msg.node_id_1); node_pks.insert(msg.node_id_2); let _ = net_graph.update_channel_from_unsigned_announcement(&msg, &Some(&FuzzChainSource { input: Arc::clone(&input) })); }, 3 => { let _ = net_graph.update_channel_unsigned(&decode_msg!(msgs::UnsignedChannelUpdate, 72)); }, 4 => { let short_channel_id = slice_to_be64(get_slice!(8)); net_graph.close_channel_from_update(short_channel_id, false); }, _ if node_pks.is_empty() => {}, _ => { let mut first_hops_vec = Vec::new(); let first_hops = match get_slice!(1)[0] { 0 => None, count => { for _ in 0..count { scid += 1; let rnid = node_pks.iter().skip(slice_to_be16(get_slice!(2))as usize % node_pks.len()).next().unwrap(); first_hops_vec.push(ChannelDetails { channel_id: [0; 32], counterparty: ChannelCounterparty { node_id: *rnid, features: InitFeatures::known(), unspendable_punishment_reserve: 0, forwarding_info: None, }, funding_txo: Some(OutPoint { txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(), index: 0 }), short_channel_id: Some(scid), channel_value_satoshis: slice_to_be64(get_slice!(8)), user_channel_id: 0, inbound_capacity_msat: 0, unspendable_punishment_reserve: None, confirmations_required: None, force_close_spend_delay: None, is_outbound: true, is_funding_locked: true, is_usable: true, is_public: true, balance_msat: 0, outbound_capacity_msat: 0, }); } Some(&first_hops_vec[..]) }, }; let mut last_hops = Vec::new(); { let count = get_slice!(1)[0]; for _ in 0..count { scid += 1; let rnid = node_pks.iter().skip(slice_to_be16(get_slice!(2))as usize % node_pks.len()).next().unwrap(); last_hops.push(RouteHint(vec![RouteHintHop { src_node_id: *rnid, short_channel_id: scid, fees: RoutingFees { base_msat: slice_to_be32(get_slice!(4)), proportional_millionths: slice_to_be32(get_slice!(4)), }, cltv_expiry_delta: slice_to_be16(get_slice!(2)), htlc_minimum_msat: Some(slice_to_be64(get_slice!(8))), htlc_maximum_msat: None, }])); } } let scorer = FixedPenaltyScorer::with_penalty(0); for target in node_pks.iter() { let route_params = RouteParameters { payment_params: PaymentParameters::from_node_id(*target).with_route_hints(last_hops.clone()), final_value_msat: slice_to_be64(get_slice!(8)), final_cltv_expiry_delta: slice_to_be32(get_slice!(4)), }; let _ = find_route(&our_pubkey, &route_params, &net_graph, first_hops.map(|c| c.iter().collect::<Vec<_>>()).as_ref().map(|a| a.as_slice()), Arc::clone(&logger), &scorer); } }, } } } pub fn router_test<Out: test_logger::Output>(data: &[u8], out: Out) { do_test(data, out); } #[no_mangle] pub extern "C" fn router_run(data: *const u8, datalen: usize) { do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {}); }
31.485507
125
0.646605
fe2cf2cb2f7dde5f36733411974c9bfecc5a2308
32,270
//! A basic table view implementation for [cursive](https://crates.io/crates/cursive). #![deny( missing_docs, missing_copy_implementations, trivial_casts, trivial_numeric_casts, unsafe_code, unused_import_braces, unused_qualifications )] // Crate Dependencies --------------------------------------------------------- extern crate cursive; // STD Dependencies ----------------------------------------------------------- use std::cmp::{self, Ordering}; use std::collections::HashMap; use std::hash::Hash; use std::rc::Rc; // External Dependencies ------------------------------------------------------ use cursive::align::HAlign; use cursive::direction::Direction; use cursive::event::{Callback, Event, EventResult, Key}; use cursive::theme; use cursive::vec::Vec2; use cursive::view::{ScrollBase, View}; use cursive::With; use cursive::{Cursive, Printer}; /// A trait for displaying and sorting items inside a /// [`TableView`](struct.TableView.html). pub trait TableViewItem<H>: Clone + Sized where H: Eq + Hash + Copy + Clone + 'static, { /// Method returning a string representation of the item for the /// specified column from type `H`. fn to_column(&self, column: H) -> String; /// Method comparing two items via their specified column from type `H`. fn cmp(&self, other: &Self, column: H) -> Ordering where Self: Sized; } /// Callback used when a column is sorted. /// /// It takes the column and the ordering as input. /// /// This is a private type to help readability. type OnSortCallback<H> = Rc<dyn Fn(&mut Cursive, H, Ordering)>; /// Callback taking as argument the row and the index of an element. /// /// This is a private type to help readability. type IndexCallback = Rc<dyn Fn(&mut Cursive, usize, usize)>; /// View to select an item among a list, supporting multiple columns for sorting. /// /// # Examples /// /// ```rust /// # extern crate cursive; /// # extern crate cursive_table_view; /// # use std::cmp::Ordering; /// # use cursive_table_view::{TableView, TableViewItem}; /// # use cursive::align::HAlign; /// # fn main() { /// // Provide a type for the table's columns /// #[derive(Copy, Clone, PartialEq, Eq, Hash)] /// enum BasicColumn { /// Name, /// Count, /// Rate /// } /// /// // Define the item type /// #[derive(Clone, Debug)] /// struct Foo { /// name: String, /// count: usize, /// rate: usize /// } /// /// impl TableViewItem<BasicColumn> for Foo { /// /// fn to_column(&self, column: BasicColumn) -> String { /// match column { /// BasicColumn::Name => self.name.to_string(), /// BasicColumn::Count => format!("{}", self.count), /// BasicColumn::Rate => format!("{}", self.rate) /// } /// } /// /// fn cmp(&self, other: &Self, column: BasicColumn) -> Ordering where Self: Sized { /// match column { /// BasicColumn::Name => self.name.cmp(&other.name), /// BasicColumn::Count => self.count.cmp(&other.count), /// BasicColumn::Rate => self.rate.cmp(&other.rate) /// } /// } /// /// } /// /// // Configure the actual table /// let table = TableView::<Foo, BasicColumn>::new() /// .column(BasicColumn::Name, "Name", |c| c.width(20)) /// .column(BasicColumn::Count, "Count", |c| c.align(HAlign::Center)) /// .column(BasicColumn::Rate, "Rate", |c| { /// c.ordering(Ordering::Greater).align(HAlign::Right).width(20) /// }) /// .default_column(BasicColumn::Name); /// # } /// ``` pub struct TableView<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> { enabled: bool, scrollbase: ScrollBase, last_size: Vec2, column_select: bool, columns: Vec<TableColumn<H>>, column_indicies: HashMap<H, usize>, focus: usize, items: Vec<T>, rows_to_items: Vec<usize>, on_sort: Option<OnSortCallback<H>>, // TODO Pass drawing offsets into the handlers so a popup menu // can be created easily? on_submit: Option<IndexCallback>, on_select: Option<IndexCallback>, } impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> Default for TableView<T, H> { /// Creates a new empty `TableView` without any columns. /// /// See [`TableView::new()`]. fn default() -> Self { Self::new() } } impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H> { /// Creates a new empty `TableView` without any columns. /// /// A TableView should be accompanied by a enum of type `H` representing /// the table columns. pub fn new() -> Self { Self { enabled: true, scrollbase: ScrollBase::new(), last_size: Vec2::new(0, 0), column_select: false, columns: Vec::new(), column_indicies: HashMap::new(), focus: 0, items: Vec::new(), rows_to_items: Vec::new(), on_sort: None, on_submit: None, on_select: None, } } /// Adds a column for the specified table colum from type `H` along with /// a title for its visual display. /// /// The provided callback can be used to further configure the /// created [`TableColumn`](struct.TableColumn.html). pub fn column<S: Into<String>, C: FnOnce(TableColumn<H>) -> TableColumn<H>>( mut self, column: H, title: S, callback: C, ) -> Self { self.column_indicies.insert(column, self.columns.len()); self.columns .push(callback(TableColumn::new(column, title.into()))); // Make the first colum the default one if self.columns.len() == 1 { self.default_column(column) } else { self } } /// Sets the initially active column of the table. pub fn default_column(mut self, column: H) -> Self { if self.column_indicies.contains_key(&column) { for c in &mut self.columns { c.selected = c.column == column; if c.selected { c.order = c.default_order; } else { c.order = Ordering::Equal; } } } self } /// Sorts the table using the specified table `column` and the passed /// `order`. pub fn sort_by(&mut self, column: H, order: Ordering) { if self.column_indicies.contains_key(&column) { for c in &mut self.columns { c.selected = c.column == column; if c.selected { c.order = order; } else { c.order = Ordering::Equal; } } } self.sort_items(column, order); } /// Sorts the table using the currently active column and its /// ordering. pub fn sort(&mut self) { if let Some((column, order)) = self.order() { self.sort_items(column, order); } } /// Returns the currently active column that is used for sorting /// along with its ordering. /// /// Might return `None` if there are currently no items in the table /// and it has not been sorted yet. pub fn order(&self) -> Option<(H, Ordering)> { for c in &self.columns { if c.order != Ordering::Equal { return Some((c.column, c.order)); } } None } /// Disables this view. /// /// A disabled view cannot be selected. pub fn disable(&mut self) { self.enabled = false; } /// Re-enables this view. pub fn enable(&mut self) { self.enabled = true; } /// Enable or disable this view. pub fn set_enabled(&mut self, enabled: bool) { self.enabled = enabled; } /// Returns `true` if this view is enabled. pub fn is_enabled(&self) -> bool { self.enabled } /// Sets a callback to be used when a selected column is sorted by /// pressing `<Enter>`. /// /// # Example /// /// ```norun /// table.set_on_sort(|siv: &mut Cursive, column: BasicColumn, order: Ordering| { /// /// }); /// ``` pub fn set_on_sort<F>(&mut self, cb: F) where F: Fn(&mut Cursive, H, Ordering) + 'static, { self.on_sort = Some(Rc::new(move |s, h, o| cb(s, h, o))); } /// Sets a callback to be used when a selected column is sorted by /// pressing `<Enter>`. /// /// Chainable variant. /// /// # Example /// /// ```norun /// table.on_sort(|siv: &mut Cursive, column: BasicColumn, order: Ordering| { /// /// }); /// ``` pub fn on_sort<F>(self, cb: F) -> Self where F: Fn(&mut Cursive, H, Ordering) + 'static, { self.with(|t| t.set_on_sort(cb)) } /// Sets a callback to be used when `<Enter>` is pressed while an item /// is selected. /// /// Both the currently selected row and the index of the corresponding item /// within the underlying storage vector will be given to the callback. /// /// # Example /// /// ```norun /// table.set_on_submit(|siv: &mut Cursive, row: usize, index: usize| { /// /// }); /// ``` pub fn set_on_submit<F>(&mut self, cb: F) where F: Fn(&mut Cursive, usize, usize) + 'static, { self.on_submit = Some(Rc::new(move |s, row, index| cb(s, row, index))); } /// Sets a callback to be used when `<Enter>` is pressed while an item /// is selected. /// /// Both the currently selected row and the index of the corresponding item /// within the underlying storage vector will be given to the callback. /// /// Chainable variant. /// /// # Example /// /// ```norun /// table.on_submit(|siv: &mut Cursive, row: usize, index: usize| { /// /// }); /// ``` pub fn on_submit<F>(self, cb: F) -> Self where F: Fn(&mut Cursive, usize, usize) + 'static, { self.with(|t| t.set_on_submit(cb)) } /// Sets a callback to be used when an item is selected. /// /// Both the currently selected row and the index of the corresponding item /// within the underlying storage vector will be given to the callback. /// /// # Example /// /// ```norun /// table.set_on_select(|siv: &mut Cursive, row: usize, index: usize| { /// /// }); /// ``` pub fn set_on_select<F>(&mut self, cb: F) where F: Fn(&mut Cursive, usize, usize) + 'static, { self.on_select = Some(Rc::new(move |s, row, index| cb(s, row, index))); } /// Sets a callback to be used when an item is selected. /// /// Both the currently selected row and the index of the corresponding item /// within the underlying storage vector will be given to the callback. /// /// Chainable variant. /// /// # Example /// /// ```norun /// table.on_select(|siv: &mut Cursive, row: usize, index: usize| { /// /// }); /// ``` pub fn on_select<F>(self, cb: F) -> Self where F: Fn(&mut Cursive, usize, usize) + 'static, { self.with(|t| t.set_on_select(cb)) } /// Removes all items from this view. pub fn clear(&mut self) { self.items.clear(); self.rows_to_items.clear(); self.focus = 0; } /// Returns the number of items in this table. pub fn len(&self) -> usize { self.items.len() } /// Returns `true` if this table has no items. pub fn is_empty(&self) -> bool { self.items.is_empty() } /// Returns the index of the currently selected table row. pub fn row(&self) -> Option<usize> { if self.items.is_empty() { None } else { Some(self.focus) } } /// Selects the row at the specified index. pub fn set_selected_row(&mut self, row_index: usize) { self.focus = row_index; self.scrollbase.scroll_to(row_index); } /// Selects the row at the specified index. /// /// Chainable variant. pub fn selected_row(self, row_index: usize) -> Self { self.with(|t| t.set_selected_row(row_index)) } /// Sets the contained items of the table. /// /// The currently active sort order is preserved and will be applied to all /// items. pub fn set_items(&mut self, items: Vec<T>) { self.items = items; self.rows_to_items = Vec::with_capacity(self.items.len()); for i in 0..self.items.len() { self.rows_to_items.push(i); } if let Some((column, order)) = self.order() { self.sort_by(column, order); } self.scrollbase .set_heights(self.last_size.y.saturating_sub(2), self.rows_to_items.len()); self.set_selected_row(0); } /// Sets the contained items of the table. /// /// The order of the items will be preserved even when the table is sorted. /// /// Chainable variant. pub fn items(self, items: Vec<T>) -> Self { self.with(|t| t.set_items(items)) } /// Returns a immmutable reference to the item at the specified index /// within the underlying storage vector. pub fn borrow_item(&mut self, index: usize) -> Option<&T> { self.items.get(index) } /// Returns a mutable reference to the item at the specified index within /// the underlying storage vector. pub fn borrow_item_mut(&mut self, index: usize) -> Option<&mut T> { self.items.get_mut(index) } /// Returns a immmutable reference to the items contained within the table. pub fn borrow_items(&mut self) -> &Vec<T> { &self.items } /// Returns a mutable reference to the items contained within the table. /// /// Can be used to modify the items in place. pub fn borrow_items_mut(&mut self) -> &mut Vec<T> { &mut self.items } /// Returns the index of the currently selected item within the underlying /// storage vector. pub fn item(&self) -> Option<usize> { if self.items.is_empty() { None } else { Some(self.rows_to_items[self.focus]) } } /// Selects the item at the specified index within the underlying storage /// vector. pub fn set_selected_item(&mut self, item_index: usize) { // TODO optimize the performance for very large item lists if item_index < self.items.len() { for (row, item) in self.rows_to_items.iter().enumerate() { if *item == item_index { self.focus = row; self.scrollbase.scroll_to(row); break; } } } } /// Selects the item at the specified index within the underlying storage /// vector. /// /// Chainable variant. pub fn selected_item(self, item_index: usize) -> Self { self.with(|t| t.set_selected_item(item_index)) } /// Inserts a new item into the table. /// /// The currently active sort order is preserved and will be applied to the /// newly inserted item. pub fn insert_item(&mut self, item: T) { self.items.push(item); self.rows_to_items.push(self.items.len() - 1); self.scrollbase .set_heights(self.last_size.y.saturating_sub(2), self.rows_to_items.len()); if let Some((column, order)) = self.order() { self.sort_by(column, order); } } /// Removes the item at the specified index within the underlying storage /// vector and returns it. pub fn remove_item(&mut self, item_index: usize) -> Option<T> { if item_index < self.items.len() { // Move the selection if the currently selected item gets removed if let Some(selected_index) = self.item() { if selected_index == item_index { self.focus_up(1); } } // Remove the sorted reference to the item self.rows_to_items.retain(|i| *i != item_index); // Adjust remaining references for ref_index in &mut self.rows_to_items { if *ref_index > item_index { *ref_index -= 1; } } // Update scroll height to prevent out of index drawing self.scrollbase .set_heights(self.last_size.y.saturating_sub(2), self.rows_to_items.len()); // Remove actual item from the underlying storage Some(self.items.remove(item_index)) } else { None } } /// Removes all items from the underlying storage and returns them. pub fn take_items(&mut self) -> Vec<T> { self.scrollbase .set_heights(self.last_size.y.saturating_sub(2), 0); self.set_selected_row(0); self.rows_to_items.clear(); self.items.drain(0..).collect() } } impl<T: TableViewItem<H>, H: Eq + Hash + Copy + Clone + 'static> TableView<T, H> { fn draw_columns<C: Fn(&Printer, &TableColumn<H>)>( &self, printer: &Printer, sep: &str, callback: C, ) { let mut column_offset = 0; let column_count = self.columns.len(); for (index, column) in self.columns.iter().enumerate() { let printer = &printer.offset((column_offset, 0)).focused(true); callback(printer, column); if index < column_count - 1 { printer.print((column.width + 1, 0), sep); } column_offset += column.width + 3; } } fn sort_items(&mut self, column: H, order: Ordering) { if !self.is_empty() { let old_item = self.item(); let mut rows_to_items = self.rows_to_items.clone(); rows_to_items.sort_by(|a, b| { if order == Ordering::Less { self.items[*a].cmp(&self.items[*b], column) } else { self.items[*b].cmp(&self.items[*a], column) } }); self.rows_to_items = rows_to_items; if let Some(old_item) = old_item { self.set_selected_item(old_item); } } } fn draw_item(&self, printer: &Printer, i: usize) { self.draw_columns(printer, "┆ ", |printer, column| { let value = self.items[self.rows_to_items[i]].to_column(column.column); column.draw_row(printer, value.as_str()); }); } fn focus_up(&mut self, n: usize) { self.focus -= cmp::min(self.focus, n); } fn focus_down(&mut self, n: usize) { self.focus = cmp::min(self.focus + n, self.items.len() - 1); } fn active_column(&self) -> usize { self.columns.iter().position(|c| c.selected).unwrap_or(0) } fn column_cancel(&mut self) { self.column_select = false; for column in &mut self.columns { column.selected = column.order != Ordering::Equal; } } fn column_next(&mut self) -> bool { let column = self.active_column(); if column < self.columns.len() - 1 { self.columns[column].selected = false; self.columns[column + 1].selected = true; true } else { false } } fn column_prev(&mut self) -> bool { let column = self.active_column(); if column > 0 { self.columns[column].selected = false; self.columns[column - 1].selected = true; true } else { false } } fn column_select(&mut self) { let next = self.active_column(); let column = self.columns[next].column; let current = self .columns .iter() .position(|c| c.order != Ordering::Equal) .unwrap_or(0); let order = if current != next { self.columns[next].default_order } else if self.columns[current].order == Ordering::Less { Ordering::Greater } else { Ordering::Less }; self.sort_by(column, order); } } impl<T: TableViewItem<H> + 'static, H: Eq + Hash + Copy + Clone + 'static> View for TableView<T, H> { fn draw(&self, printer: &Printer) { self.draw_columns(printer, "╷ ", |printer, column| { let color = if column.order != Ordering::Equal || column.selected { if self.column_select && column.selected && self.enabled && printer.focused { theme::ColorStyle::highlight() } else { theme::ColorStyle::highlight_inactive() } } else { theme::ColorStyle::primary() }; printer.with_color(color, |printer| { column.draw_header(printer); }); }); self.draw_columns( &printer.offset((0, 1)).focused(true), "┴─", |printer, column| { printer.print_hline((0, 0), column.width + 1, "─"); }, ); let printer = &printer.offset((0, 2)).focused(true); self.scrollbase.draw(printer, |printer, i| { let color = if i == self.focus { if !self.column_select && self.enabled && printer.focused { theme::ColorStyle::highlight() } else { theme::ColorStyle::highlight_inactive() } } else { theme::ColorStyle::primary() }; if i < self.items.len() { printer.with_color(color, |printer| { self.draw_item(printer, i); }); } }); } fn layout(&mut self, size: Vec2) { if size == self.last_size { return; } let item_count = self.items.len(); let column_count = self.columns.len(); // Split up all columns into sized / unsized groups let (mut sized, mut usized): (Vec<&mut TableColumn<H>>, Vec<&mut TableColumn<H>>) = self .columns .iter_mut() .partition(|c| c.requested_width.is_some()); // Subtract one for the seperators between our columns (that's column_count - 1) let mut available_width = size.x.saturating_sub(column_count.saturating_sub(1) * 3); // Reduce the with in case we are displaying a scrollbar if size.y.saturating_sub(1) < item_count { available_width = available_width.saturating_sub(2); } // Calculate widths for all requested columns let mut remaining_width = available_width; for column in &mut sized { column.width = match *column.requested_width.as_ref().unwrap() { TableColumnWidth::Percent(width) => cmp::min( (size.x as f32 / 100.0 * width as f32).ceil() as usize, remaining_width, ), TableColumnWidth::Absolute(width) => width, }; remaining_width = remaining_width.saturating_sub(column.width); } // Spread the remaining with across the unsized columns let remaining_columns = usized.len(); for column in &mut usized { column.width = (remaining_width as f32 / remaining_columns as f32).floor() as usize; } self.scrollbase .set_heights(size.y.saturating_sub(2), item_count); self.last_size = size; } fn take_focus(&mut self, _: Direction) -> bool { self.enabled } fn on_event(&mut self, event: Event) -> EventResult { if !self.enabled { return EventResult::Ignored; } let last_focus = self.focus; match event { Event::Key(Key::Right) => { if self.column_select { if !self.column_next() { return EventResult::Ignored; } } else { self.column_select = true; } } Event::Key(Key::Left) => { if self.column_select { if !self.column_prev() { return EventResult::Ignored; } } else { self.column_select = true; } } Event::Key(Key::Up) if self.focus > 0 || self.column_select => { if self.column_select { self.column_cancel(); } else { self.focus_up(1); } } Event::Key(Key::Down) if self.focus + 1 < self.items.len() || self.column_select => { if self.column_select { self.column_cancel(); } else { self.focus_down(1); } } Event::Key(Key::PageUp) => { self.column_cancel(); self.focus_up(10); } Event::Key(Key::PageDown) => { self.column_cancel(); self.focus_down(10); } Event::Key(Key::Home) => { self.column_cancel(); self.focus = 0; } Event::Key(Key::End) => { self.column_cancel(); self.focus = self.items.len() - 1; } Event::Key(Key::Enter) => { if self.column_select { self.column_select(); if self.on_sort.is_some() { let c = &self.columns[self.active_column()]; let column = c.column; let order = c.order; let cb = self.on_sort.clone().unwrap(); return EventResult::Consumed(Some(Callback::from_fn(move |s| { cb(s, column, order) }))); } } else if !self.is_empty() && self.on_submit.is_some() { let cb = self.on_submit.clone().unwrap(); let row = self.row().unwrap(); let index = self.item().unwrap(); return EventResult::Consumed(Some(Callback::from_fn(move |s| { cb(s, row, index) }))); } } _ => return EventResult::Ignored, } let focus = self.focus; self.scrollbase.scroll_to(focus); if self.column_select { EventResult::Consumed(None) } else if !self.is_empty() && last_focus != focus { let row = self.row().unwrap(); let index = self.item().unwrap(); EventResult::Consumed( self.on_select .clone() .map(|cb| Callback::from_fn(move |s| cb(s, row, index))), ) } else { EventResult::Ignored } } } /// A type used for the construction of columns in a /// [`TableView`](struct.TableView.html). pub struct TableColumn<H: Copy + Clone + 'static> { column: H, title: String, selected: bool, alignment: HAlign, order: Ordering, width: usize, default_order: Ordering, requested_width: Option<TableColumnWidth>, } enum TableColumnWidth { Percent(usize), Absolute(usize), } impl<H: Copy + Clone + 'static> TableColumn<H> { /// Sets the default ordering of the column. pub fn ordering(mut self, order: Ordering) -> Self { self.default_order = order; self } /// Sets the horizontal text alignment of the column. pub fn align(mut self, alignment: HAlign) -> Self { self.alignment = alignment; self } /// Sets how many characters of width this column will try to occupy. pub fn width(mut self, width: usize) -> Self { self.requested_width = Some(TableColumnWidth::Absolute(width)); self } /// Sets what percentage of the width of the entire table this column will /// try to occupy. pub fn width_percent(mut self, width: usize) -> Self { self.requested_width = Some(TableColumnWidth::Percent(width)); self } fn new(column: H, title: String) -> Self { Self { column, title, selected: false, alignment: HAlign::Left, order: Ordering::Equal, width: 0, default_order: Ordering::Less, requested_width: None, } } fn draw_header(&self, printer: &Printer) { let order = match self.order { Ordering::Less => "^", Ordering::Greater => "v", Ordering::Equal => " ", }; let header = match self.alignment { HAlign::Left => format!( "{:<width$} [{}]", self.title, order, width = self.width.saturating_sub(4) ), HAlign::Right => format!( "{:>width$} [{}]", self.title, order, width = self.width.saturating_sub(4) ), HAlign::Center => format!( "{:^width$} [{}]", self.title, order, width = self.width.saturating_sub(4) ), }; printer.print((0, 0), header.as_str()); } fn draw_row(&self, printer: &Printer, value: &str) { let value = match self.alignment { HAlign::Left => format!("{:<width$} ", value, width = self.width), HAlign::Right => format!("{:>width$} ", value, width = self.width), HAlign::Center => format!("{:^width$} ", value, width = self.width), }; printer.print((0, 0), value.as_str()); } } #[cfg(test)] mod tests { use super::*; #[derive(Copy, Clone, PartialEq, Eq, Hash)] enum SimpleColumn { Name, } #[allow(dead_code)] impl SimpleColumn { fn as_str(&self) -> &str { match *self { SimpleColumn::Name => "Name", } } } #[derive(Clone, Debug)] struct SimpleItem { name: String, } impl TableViewItem<SimpleColumn> for SimpleItem { fn to_column(&self, column: SimpleColumn) -> String { match column { SimpleColumn::Name => self.name.to_string(), } } fn cmp(&self, other: &Self, column: SimpleColumn) -> Ordering where Self: Sized, { match column { SimpleColumn::Name => self.name.cmp(&other.name), } } } fn setup_test_table() -> TableView<SimpleItem, SimpleColumn> { TableView::<SimpleItem, SimpleColumn>::new() .column(SimpleColumn::Name, "Name", |c| c.width_percent(20)) } #[test] fn should_insert_into_existing_table() { let mut simple_table = setup_test_table(); let mut simple_items = Vec::new(); for i in 1..=10 { simple_items.push(SimpleItem { name: format!("{} - Name", i), }); } // Insert First Batch of Items simple_table.set_items(simple_items); // Test for Additional item insertion simple_table.insert_item(SimpleItem { name: format!("{} Name", 11), }); assert!(simple_table.len() == 11); } #[test] fn should_insert_into_empty_table() { let mut simple_table = setup_test_table(); // Test for First item insertion simple_table.insert_item(SimpleItem { name: format!("{} Name", 1), }); assert!(simple_table.len() == 1); } }
30.472144
97
0.526557
67755d0715e659e74d1f21a7472625a3183d7b20
483
use rusoto_core::Region; pub const ENDPOINT: &str = "http://localhost:4566"; #[allow(dead_code)] pub fn setup() { std::env::set_var("AWS_REGION", "us-east-2"); std::env::set_var("AWS_ACCESS_KEY_ID", "test"); std::env::set_var("AWS_SECRET_ACCESS_KEY", "test"); std::env::set_var("AWS_ENDPOINT_URL", ENDPOINT); } #[allow(dead_code)] pub fn region() -> Region { Region::Custom { name: "custom".to_string(), endpoint: ENDPOINT.to_string(), } }
24.15
55
0.63354
8f837c3edab246b040c2a1c4098a7d40c78810ec
3,755
extern crate regex; use std::collections::VecDeque; use std::io; use std::io::prelude::*; use regex::Regex; #[derive(Debug)] struct Range((usize, usize), (usize, usize)); #[derive(Debug)] enum OperationType { On, Off, Toggle, } #[derive(Debug)] struct Operation { op_type: OperationType, range: Range, } fn main() { let mut oper_list = VecDeque::new(); { let re = Regex::new("^(turn on|turn off|toggle) ([0-9]+),([0-9]+) through \ ([0-9]+),([0-9]+)$") .unwrap(); let stdin = io::stdin(); for uline in stdin.lock().lines() { let line = &uline.unwrap(); let capture = re.captures(line).unwrap(); let start_point = (capture.at(2).unwrap().parse::<usize>().unwrap(), capture.at(3).unwrap().parse::<usize>().unwrap()); let end_point = (capture.at(4).unwrap().parse::<usize>().unwrap(), capture.at(5).unwrap().parse::<usize>().unwrap()); let op; let op_str = capture.at(1).unwrap(); let op_range = Range(start_point, end_point); match op_str { "turn on" => { op = Operation { op_type: OperationType::On, range: op_range, } } "turn off" => { op = Operation { op_type: OperationType::Off, range: op_range, } } "toggle" => { op = Operation { op_type: OperationType::Toggle, range: op_range, } } _ => panic!("Unknown operation: {:?}", op_str), } oper_list.push_back(op); } } let mut light_matrix = [[false; 1000]; 1000]; for oper in oper_list.iter() { // + 1 because the ranges are inclusive for x in ((oper.range.0).0)..((oper.range.1).0 + 1) { for y in ((oper.range.0).1)..((oper.range.1).1 + 1) { match oper.op_type { OperationType::On => light_matrix[x][y] = true, OperationType::Off => light_matrix[x][y] = false, OperationType::Toggle => light_matrix[x][y] = !light_matrix[x][y], } } } } let sum = light_matrix.iter().fold(0, |total_acc, &row| { total_acc + row.iter().fold(0, |acc, &light_state| { if light_state { acc + 1 } else { acc } }) }); println!("Number of light lit after all operations: {}", sum); let mut brt_matrix = [[0 as u32; 1000]; 1000]; for oper in oper_list.iter() { // + 1 because the ranges are inclusive for x in ((oper.range.0).0)..((oper.range.1).0 + 1) { for y in ((oper.range.0).1)..((oper.range.1).1 + 1) { match oper.op_type { OperationType::On => brt_matrix[x][y] += 1, OperationType::Off => { if brt_matrix[x][y] != 0 { brt_matrix[x][y] -= 1 } } OperationType::Toggle => brt_matrix[x][y] += 2, } } } } let brt_sum = brt_matrix.iter().fold(0, |total_acc, &row| { total_acc + row.iter().fold(0, |acc, &light_state| acc + light_state) }); println!("Total brightness after all operations: {}", brt_sum); }
27.210145
86
0.435686
d5680aad51f046919fc4924294a8564a324536a9
6,355
use disco::ed25519; use disco::x25519::{PublicKey, StaticSecret}; use disco::SessionBuilder; #[test] fn test_nn_session() { let mut session1 = SessionBuilder::new("NN").build_initiator(); let mut session2 = SessionBuilder::new("NN").build_responder(); println!("-> e"); let ct = session1.write_message(&[]); session2.read_message(&ct).unwrap(); println!("<- e ee"); let ct = session2.write_message(&[]); session1.read_message(&ct).unwrap(); let session1 = session1.into_stateless_transport_mode(); let session2 = session2.into_stateless_transport_mode(); println!("->"); let mut msg1 = b"msg1".to_vec(); let mut msg2 = b"msg2".to_vec(); let tag1 = session1.write_message(1, &mut msg1); let tag2 = session1.write_message(2, &mut msg2); assert_ne!(msg1, b"msg1"); assert_ne!(msg2, b"msg2"); session2.read_message(2, &mut msg2, tag2).unwrap(); session2.read_message(1, &mut msg1, tag1).unwrap(); assert_eq!(msg1, b"msg1"); assert_eq!(msg2, b"msg2"); } #[test] fn test_kk_session() { let secret1 = StaticSecret::new(&mut rand::rngs::OsRng); let public1 = PublicKey::from(&secret1); let secret2 = StaticSecret::new(&mut rand::rngs::OsRng); let public2 = PublicKey::from(&secret2); let mut session1 = SessionBuilder::new("KK") .secret(secret1) .remote_public(public2) .build_initiator(); let mut session2 = SessionBuilder::new("KK") .secret(secret2) .remote_public(public1) .build_responder(); println!("->"); let ct = session1.write_message(b"e es ss"); let pt = session2.read_message(&ct).expect("pt"); assert_eq!(&pt, b"e es ss"); println!("<-"); let ct = session2.write_message(b"e ee se"); let pt = session1.read_message(&ct).expect("pt"); assert_eq!(&pt, b"e ee se"); let mut session1 = session1.into_transport_mode(); let mut session2 = session2.into_transport_mode(); println!("->"); let ct = session1.write_message(b"hello"); let pt = session2.read_message(&ct).expect("pt"); assert_eq!(&pt, b"hello"); } #[derive(Clone, Debug)] struct Verifier { root_public: ed25519::PublicKey, } impl Verifier { pub fn new(root_public: ed25519::PublicKey) -> Self { Self { root_public } } pub fn verify(&self, public: &PublicKey, proof: &[u8]) -> bool { if let Ok(sig) = ed25519::Signature::from_bytes(proof) { if let Ok(()) = self.root_public.verify(public.as_bytes(), &sig) { return true; } } false } } #[test] fn test_xx_session() { let root = ed25519::Keypair::generate(&mut rand::rngs::OsRng); let verifier = Verifier::new(root.public.clone()); let secret1 = StaticSecret::new(&mut rand::rngs::OsRng); let public1 = PublicKey::from(&secret1); let proof1 = root.sign(public1.as_bytes()); let secret2 = StaticSecret::new(&mut rand::rngs::OsRng); let public2 = PublicKey::from(&secret2); let proof2 = root.sign(public2.as_bytes()); let mut session1 = SessionBuilder::new("XX").secret(secret1).build_initiator(); let mut session2 = SessionBuilder::new("XX").secret(secret2).build_responder(); println!("-> e"); let ct = session1.write_message(&[]); session2.read_message(&ct).expect("pt"); println!("<- e ee s es"); let ct = session2.write_message(&proof2.to_bytes()[..]); let proof2 = session1.read_message(&ct).expect("pt"); let public2 = session1.get_remote_static().expect("s").x25519(); assert!(verifier.verify(public2, &proof2)); println!("-> s se"); let ct = session1.write_message(&proof1.to_bytes()[..]); let proof1 = session2.read_message(&ct).expect("pt"); let public1 = session2.get_remote_static().expect("s").x25519(); assert!(verifier.verify(public1, &proof1)); let mut session1 = session1.into_transport_mode(); let mut session2 = session2.into_transport_mode(); println!("<-"); let ct = session2.write_message(b"hello"); let pt = session1.read_message(&ct).expect("pt"); assert_eq!(&pt, b"hello"); } #[test] fn test_nnpsk2_session() { // Also test prologue and rekeying. let mut session1 = SessionBuilder::new("NNpsk2") .prologue(b"prologue".to_vec()) .add_psk([0u8; 32]) .build_initiator(); let mut session2 = SessionBuilder::new("NNpsk2") .prologue(b"prologue".to_vec()) .add_psk([0u8; 32]) .build_responder(); println!("->"); let ct = session1.write_message(b"e"); let pt = session2.read_message(&ct).expect("pt"); assert_eq!(&pt, b"e"); println!("<-"); let ct = session2.write_message(b"e ee psk"); let pt = session1.read_message(&ct).expect("pt"); assert_eq!(&pt, b"e ee psk"); let mut session1 = session1.into_transport_mode(); let mut session2 = session2.into_transport_mode(); session1.rekey_outgoing(); session2.rekey_incoming(); println!("->"); let ct = session1.write_message(b"hello"); let pt = session2.read_message(&ct).expect("pt"); assert_eq!(&pt, b"hello"); } #[test] fn test_k1k1sig_session() { let key1 = ed25519::Keypair::generate(&mut rand::rngs::OsRng); let pub1 = key1.public.clone(); let key2 = ed25519::Keypair::generate(&mut rand::rngs::OsRng); let pub2 = key2.public.clone(); let mut session1 = SessionBuilder::new("K1K1sig") .secret(&key1) .remote_public(pub2) .build_initiator(); let mut session2 = SessionBuilder::new("K1K1sig") .secret(&key2) .remote_public(pub1) .build_responder(); println!("->"); let ct = session1.write_message(&[]); let pt = session2.read_message(&ct).unwrap(); assert_eq!(&pt, &[]); println!("<-"); let ct = session2.write_message(&[]); let pt = session1.read_message(&ct).unwrap(); assert_eq!(&pt, &[]); println!("->"); let ct = session1.write_message(&[]); let pt = session2.read_message(&ct).unwrap(); assert_eq!(&pt, &[]); let mut session1 = session1.into_transport_mode(); let mut session2 = session2.into_transport_mode(); println!("<-"); let ct = session1.write_message(b"hello"); let pt = session2.read_message(&ct).expect("pt"); assert_eq!(&pt, b"hello"); }
30.849515
83
0.626121
fb85b041ca8bdebbe79f15b45ec25114f8f9726b
3,016
// Copyright ⓒ 2015-2016 Kevin B. Knapp and [`clap-rs` contributors](https://github.com/clap-rs/clap/graphs/contributors). // Licensed under the MIT license // (see LICENSE or <http://opensource.org/licenses/MIT>) All files in the project carrying such // notice may not be copied, modified, or distributed except according to those terms. #![cfg_attr(feature = "doc", doc = include_str!("../README.md"))] //! <https://github.com/clap-rs/clap> #![warn( missing_docs, missing_debug_implementations, missing_copy_implementations, trivial_casts, unused_allocation, trivial_numeric_casts )] #![forbid(unsafe_code)] // TODO: https://github.com/rust-lang/rust-clippy/issues/7290 #![allow(clippy::single_component_path_imports)] #![allow(clippy::branches_sharing_code)] #[cfg(not(feature = "std"))] compile_error!("`std` feature is currently required to build `clap`"); #[cfg(feature = "color")] pub use crate::util::color::ColorChoice; pub use crate::{ build::{ App, AppFlags, AppSettings, Arg, ArgFlags, ArgGroup, ArgSettings, PossibleValue, ValueHint, }, parse::errors::{Error, ErrorKind, Result}, parse::{ArgMatches, Indices, OsValues, Values}, }; pub use crate::derive::{ArgEnum, Args, FromArgMatches, IntoApp, Parser, Subcommand}; #[cfg(feature = "yaml")] #[doc(hidden)] #[deprecated( since = "3.0.0", note = "Deprecated in Issue #9, maybe clap::Parser would fit your use case?" )] pub use yaml_rust::YamlLoader; #[cfg(feature = "derive")] #[doc(hidden)] pub use clap_derive::{self, *}; /// Deprecated, replaced with [`Parser`] #[deprecated(since = "3.0.0", note = "Replaced with `Parser`")] pub use Parser as StructOpt; #[cfg(any(feature = "derive", feature = "cargo"))] #[doc(hidden)] pub use lazy_static; #[macro_use] #[allow(missing_docs)] mod macros; mod derive; #[cfg(feature = "regex")] pub use crate::build::arg::RegexRef; mod build; mod mkeymap; mod output; mod parse; mod util; const INTERNAL_ERROR_MSG: &str = "Fatal internal error. Please consider filing a bug \ report at https://github.com/clap-rs/clap/issues"; const INVALID_UTF8: &str = "unexpected invalid UTF-8 code point"; /// Deprecated, replaced with [`App`] #[deprecated(since = "3.0.0", note = "Replaced with `App`")] #[derive(Debug, Copy, Clone)] pub struct SubCommand {} #[allow(deprecated)] impl SubCommand { /// Deprecated, replaced with [`App::new`] #[deprecated(since = "3.0.0", note = "Replaced with `App::new`")] pub fn with_name<'help>(name: &str) -> App<'help> { App::new(name) } /// Deprecated in [Issue #9](https://github.com/epage/clapng/issues/9), maybe [`clap::Parser`][crate::Parser] would fit your use case? #[cfg(feature = "yaml")] #[deprecated( since = "3.0.0", note = "Deprecated in Issue #9, maybe clap::Parser would fit your use case?" )] pub fn from_yaml(yaml: &yaml_rust::Yaml) -> App { #![allow(deprecated)] App::from_yaml(yaml) } }
30.464646
138
0.664788
ed00eff53cdf03abd0b9f0500283d030367f2226
1,101
//! //! Support for Slack Webhooks methods //! use rsb_derive::Builder; use serde::{Deserialize, Serialize}; use serde_with::skip_serializing_none; use crate::SlackClient; use crate::{ClientResult, SlackClientHttpConnector}; use slack_morphism_models::*; impl<SCHC> SlackClient<SCHC> where SCHC: SlackClientHttpConnector + Send, { /// /// Post a webhook message using webhook url /// pub async fn post_webhook_message( &self, hook_url: &str, req: &SlackApiPostWebhookMessageRequest, ) -> ClientResult<SlackApiPostWebhookMessageResponse> { self.http_api .connector .http_post_uri(hook_url.parse()?, req, None) .await } } #[skip_serializing_none] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Builder)] pub struct SlackApiPostWebhookMessageRequest { #[serde(flatten)] pub content: SlackMessageContent, pub thread_ts: Option<SlackTs>, } #[skip_serializing_none] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Builder)] pub struct SlackApiPostWebhookMessageResponse {}
25.604651
67
0.704814
f79b4ea0e6eac0e0e2e5372fddd710df01b1ee18
502
#![allow(unused_imports)] use wasm_bindgen::prelude::*; #[wasm_bindgen] #[doc = "The `PcImplIceConnectionState` enum."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `PcImplIceConnectionState`*"] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum PcImplIceConnectionState { New = "new", Checking = "checking", Connected = "connected", Completed = "completed", Failed = "failed", Disconnected = "disconnected", Closed = "closed", }
29.529412
103
0.673307
b918ba3bd8e77702bab7b902030b8ab5bb2dbe48
2,774
use std::rc::Rc; use glium::{Display, Surface, Frame}; use color::Color; use render::{Renderable, Renderer}; use texture::Texture; use mesh::{VertexBuffer, IndexBuffer, Polygon, Mesh}; use math::Mat; use sprite::Sprite; pub struct Batch { texture: Rc<Texture>, mesh: Mesh, color_multiply: Color, } impl Batch { pub fn from_sprites(display: &Display, sprites: &[&Sprite]) -> Batch { use glium::index::PrimitiveType; let len = sprites.len(); if len == 0 { panic!() } let first = sprites[0]; let mut vb = VertexBuffer::empty_dynamic(display, len * 4).unwrap(); let mut ib = Vec::with_capacity(len * 6); for (i, chunk) in vb.map().chunks_mut(4).enumerate() { let sprite = sprites[i]; assert!(first.batchable(sprite)); let vertices = sprite.rectangle().as_array(); for i in 0..4 { chunk[i] = vertices[i]; } let num = i as u16; ib.push(num * 4 + 0); ib.push(num * 4 + 1); ib.push(num * 4 + 2); ib.push(num * 4 + 1); ib.push(num * 4 + 3); ib.push(num * 4 + 2); } Batch { texture: first.texture.clone(), mesh: Mesh { index_buffer: IndexBuffer::new(display, PrimitiveType::TrianglesList, &ib[..]).unwrap(), vertex_buffer: vb, }, color_multiply: first.color_multiply, } } } impl Renderable for Batch { fn draw(&self, renderer: &Renderer, target: &mut Frame, parent: &Mat) { renderer.draw(target, &self.mesh, &uniform! { matrix: *parent, color_multiply: self.color_multiply, tex: &self.texture.data } ); } } impl<'a> Renderable for Vec<&'a Sprite> { fn draw(&self, renderer: &Renderer, target: &mut Frame, parent: &Mat) { let len = self.len(); if len == 0 { return } let mut i = 0; for j in 1..len+1 { // utill last element or next unbatchable sprite. if j != len && self[i].batchable(self[j]) { continue } // else cutoff and render. if j-i > 1 { // multiple sprite should batch ender. let sprites = &self[i..j]; let batch = Batch::from_sprites(renderer.display, sprites); batch.draw(renderer, target, parent); } else { // only one sprite. self[i].draw(renderer, target, parent); } i = j; } } }
26.932039
76
0.483417
69ddf958a5d464ca7cde08316b6a2ddcde316f04
3,840
use ::chrono::prelude::*; use ::serde_derive::{Deserialize, Serialize}; use ::std::collections::HashMap; use ::strum_macros::Display; /// This enum represents the status of the internal task handling of Pueue. /// They basically represent the internal task life-cycle. #[derive(Clone, Debug, Display, PartialEq, Serialize, Deserialize)] pub enum TaskStatus { /// The task is queued and waiting for a free slot Queued, /// The task has been manually stashed. It won't be executed until it's manually enqueued Stashed, /// The task is started and running Running, /// A previously running task has been paused Paused, /// Task finished successfully Done, /// Used while the command of a task is edited (to prevent starting the task) Locked, } /// This enum represents the exit status of the actually spawned program. #[derive(Clone, Debug, Display, PartialEq, Serialize, Deserialize)] pub enum TaskResult { /// Task exited with 0 Success, /// The task failed in some other kind of way (error code != 0) Failed(i32), /// The task couldn't be spawned. Probably a typo in the command FailedToSpawn(String), /// Task has been actively killed by either the user or the daemon on shutdown Killed, /// A dependency of the task failed. DependencyFailed, } /// Representation of a task. /// start will be set the second the task starts processing. /// exit_code, output and end won't be initialized, until the task has finished. /// The output of the task is written into seperate files. /// Upon task completion, the output is read from the files and put into the struct. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Task { pub id: usize, pub command: String, pub path: String, pub envs: HashMap<String, String>, pub group: Option<String>, pub enqueue_at: Option<DateTime<Local>>, pub dependencies: Vec<usize>, pub status: TaskStatus, pub prev_status: TaskStatus, pub result: Option<TaskResult>, pub start: Option<DateTime<Local>>, pub end: Option<DateTime<Local>>, } impl Task { pub fn new( command: String, path: String, envs: HashMap<String, String>, group: Option<String>, starting_status: TaskStatus, enqueue_at: Option<DateTime<Local>>, dependencies: Vec<usize>, ) -> Task { Task { id: 0, command, path, envs, group, enqueue_at, dependencies, status: starting_status.clone(), prev_status: starting_status, result: None, start: None, end: None, } } pub fn from_task(task: &Task) -> Task { Task { id: 0, command: task.command.clone(), path: task.path.clone(), envs: task.envs.clone(), group: None, enqueue_at: None, dependencies: Vec::new(), status: TaskStatus::Queued, prev_status: TaskStatus::Queued, result: None, start: None, end: None, } } pub fn is_running(&self) -> bool { self.status == TaskStatus::Running || self.status == TaskStatus::Paused } pub fn is_done(&self) -> bool { self.status == TaskStatus::Done } // Check if the task errored. // The only case when it didn't error is if it didn't run yet or if the task exited successfully. pub fn failed(&self) -> bool { match self.result { None => false, Some(TaskResult::Success) => false, _ => true, } } pub fn is_queued(&self) -> bool { self.status == TaskStatus::Queued || self.status == TaskStatus::Stashed } }
30.72
101
0.607031
019f7a5acedafe9149496a72fc75e33750ad5c04
1,538
// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= #![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")] //! <p><fullname>Amazon ElastiCache</fullname> <p>Amazon ElastiCache is a web service that makes it easier to set up, operate, and scale a distributed cache in the cloud.</p> <p>With ElastiCache, customers get all of the benefits of a high-performance, in-memory cache with less of the administrative burden involved in launching and managing a distributed cache. The service makes setup, scaling, and cluster failure handling much simpler than in a self-managed cache deployment.</p> <p>In addition, through integration with Amazon CloudWatch, customers get enhanced visibility into the key performance statistics associated with their cache and can receive alarms if a part of their cache runs hot.</p></p> //! //! If you're using the service, you're probably looking for [ElastiCacheClient](struct.ElastiCacheClient.html) and [ElastiCache](trait.ElastiCache.html). extern crate futures; extern crate rusoto_core; extern crate xml; mod generated; mod custom; pub use generated::*; pub use custom::*;
53.034483
708
0.671001
1e19401479687815644a47d24a7849c3341ac789
6,745
// This test case tests the incremental compilation hash (ICH) implementation // for match expressions. // The general pattern followed here is: Change one thing between rev1 and rev2 // and make sure that the hash has changed, then change nothing between rev2 and // rev3 and make sure that the hash has not changed. // build-pass (FIXME(62277): could be check-pass?) // revisions: cfail1 cfail2 cfail3 // compile-flags: -Z query-dep-graph -Zincremental-ignore-spans #![allow(warnings)] #![feature(rustc_attrs)] #![crate_type="rlib"] // Add Arm --------------------------------------------------------------------- #[cfg(cfail1)] pub fn add_arm(x: u32) -> u32 { match x { 0 => 0, 1 => 1, _ => 100, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn add_arm(x: u32) -> u32 { match x { 0 => 0, 1 => 1, 2 => 2, _ => 100, } } // Change Order Of Arms -------------------------------------------------------- #[cfg(cfail1)] pub fn change_order_of_arms(x: u32) -> u32 { match x { 0 => 0, 1 => 1, _ => 100, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir")] #[rustc_clean(cfg="cfail3")] pub fn change_order_of_arms(x: u32) -> u32 { match x { 1 => 1, 0 => 0, _ => 100, } } // Add Guard Clause ------------------------------------------------------------ #[cfg(cfail1)] pub fn add_guard_clause(x: u32, y: bool) -> u32 { match x { 0 => 0, 1 => 1, _ => 100, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn add_guard_clause(x: u32, y: bool) -> u32 { match x { 0 => 0, 1 if y => 1, _ => 100, } } // Change Guard Clause ------------------------------------------------------------ #[cfg(cfail1)] pub fn change_guard_clause(x: u32, y: bool) -> u32 { match x { 0 => 0, 1 if y => 1, _ => 100, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn change_guard_clause(x: u32, y: bool) -> u32 { match x { 0 => 0, 1 if !y => 1, _ => 100, } } // Add @-Binding --------------------------------------------------------------- #[cfg(cfail1)] pub fn add_at_binding(x: u32) -> u32 { match x { 0 => 0, 1 => 1, _ => x, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn add_at_binding(x: u32) -> u32 { match x { 0 => 0, 1 => 1, x @ _ => x, } } // Change Name of @-Binding ---------------------------------------------------- #[cfg(cfail1)] pub fn change_name_of_at_binding(x: u32) -> u32 { match x { 0 => 0, 1 => 1, x @ _ => 7, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir")] #[rustc_clean(cfg="cfail3")] pub fn change_name_of_at_binding(x: u32) -> u32 { match x { 0 => 0, 1 => 1, y @ _ => 7, } } // Change Simple Binding To Pattern -------------------------------------------- #[cfg(cfail1)] pub fn change_simple_name_to_pattern(x: u32) -> u32 { match (x, x & 1) { (0, 0) => 0, a => 1, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn change_simple_name_to_pattern(x: u32) -> u32 { match (x, x & 1) { (0, 0) => 0, (x, y) => 1, } } // Change Name In Pattern ------------------------------------------------------ #[cfg(cfail1)] pub fn change_name_in_pattern(x: u32) -> u32 { match (x, x & 1) { (a, 0) => 0, (a, 1) => a, _ => 100, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir")] #[rustc_clean(cfg="cfail3")] pub fn change_name_in_pattern(x: u32) -> u32 { match (x, x & 1) { (b, 0) => 0, (a, 1) => a, _ => 100, } } // Change Mutability Of Binding In Pattern ------------------------------------- #[cfg(cfail1)] pub fn change_mutability_of_binding_in_pattern(x: u32) -> u32 { match (x, x & 1) { (a, 0) => 0, _ => 1, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn change_mutability_of_binding_in_pattern(x: u32) -> u32 { match (x, x & 1) { (mut a, 0) => 0, _ => 1, } } // Add `ref` To Binding In Pattern ------------------------------------- #[cfg(cfail1)] pub fn add_ref_to_binding_in_pattern(x: u32) -> u32 { match (x, x & 1) { (a, 0) => 0, _ => 1, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn add_ref_to_binding_in_pattern(x: u32) -> u32 { match (x, x & 1) { (ref a, 0) => 0, _ => 1, } } // Add `&` To Binding In Pattern ------------------------------------- #[cfg(cfail1)] pub fn add_amp_to_binding_in_pattern(x: u32) -> u32 { match (&x, x & 1) { (a, 0) => 0, _ => 1, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn add_amp_to_binding_in_pattern(x: u32) -> u32 { match (&x, x & 1) { (&a, 0) => 0, _ => 1, } } // Change RHS Of Arm ----------------------------------------------------------- #[cfg(cfail1)] pub fn change_rhs_of_arm(x: u32) -> u32 { match x { 0 => 0, 1 => 1, _ => 2, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir")] #[rustc_clean(cfg="cfail3")] pub fn change_rhs_of_arm(x: u32) -> u32 { match x { 0 => 0, 1 => 3, _ => 2, } } // Add Alternative To Arm ------------------------------------------------------ #[cfg(cfail1)] pub fn add_alternative_to_arm(x: u32) -> u32 { match x { 0 => 0, 1 => 1, _ => 2, } } #[cfg(not(cfail1))] #[rustc_clean(cfg="cfail2", except="hir_owner_nodes,optimized_mir,typeck_tables_of")] #[rustc_clean(cfg="cfail3")] pub fn add_alternative_to_arm(x: u32) -> u32 { match x { 0 | 7 => 0, 1 => 3, _ => 2, } }
21.144201
83
0.489548
89420a4227e712cd78470664d1af7c8ea4ae90c7
5,184
use core::marker::PhantomData; use core::mem::MaybeUninit; use cortex_m::peripheral::scb::VectActive; use cortex_m::peripheral::{NVIC, SCB}; use embassy::interrupt::{Interrupt, InterruptExt}; /// A type which can be used as state with `PeripheralMutex`. /// /// It needs to be `Send` because `&mut` references are sent back and forth between the 'thread' which owns the `PeripheralMutex` and the interrupt, /// and `&mut T` is only `Send` where `T: Send`. pub trait PeripheralState: Send { type Interrupt: Interrupt; fn on_interrupt(&mut self); } pub struct StateStorage<S>(MaybeUninit<S>); impl<S> StateStorage<S> { pub const fn new() -> Self { Self(MaybeUninit::uninit()) } } pub struct PeripheralMutex<'a, S: PeripheralState> { state: *mut S, _phantom: PhantomData<&'a mut S>, irq: S::Interrupt, } /// Whether `irq` can be preempted by the current interrupt. pub(crate) fn can_be_preempted(irq: &impl Interrupt) -> bool { match SCB::vect_active() { // Thread mode can't preempt anything. VectActive::ThreadMode => false, // Exceptions don't always preempt interrupts, // but there isn't much of a good reason to be keeping a `PeripheralMutex` in an exception anyway. VectActive::Exception(_) => true, VectActive::Interrupt { irqn } => { #[derive(Clone, Copy)] struct NrWrap(u16); unsafe impl cortex_m::interrupt::InterruptNumber for NrWrap { fn number(self) -> u16 { self.0 } } NVIC::get_priority(NrWrap(irqn.into())) < irq.get_priority().into() } } } impl<'a, S: PeripheralState> PeripheralMutex<'a, S> { /// Create a new `PeripheralMutex` wrapping `irq`, with `init` initializing the initial state. /// /// self requires `S` to live for `'static`, because if the `PeripheralMutex` is leaked, the /// interrupt won't be disabled, which may try accessing the state at any time. To use non-`'static` /// state, see [`Self::new_unchecked`]. /// /// Registers `on_interrupt` as the `irq`'s handler, and enables it. pub fn new( irq: S::Interrupt, storage: &'a mut StateStorage<S>, init: impl FnOnce() -> S, ) -> Self where 'a: 'static, { // safety: safe because state is `'static`. unsafe { Self::new_unchecked(irq, storage, init) } } /// Create a `PeripheralMutex` without requiring the state is `'static`. /// /// See also [`Self::new`]. /// /// # Safety /// The created instance must not be leaked (its `drop` must run). pub unsafe fn new_unchecked( irq: S::Interrupt, storage: &'a mut StateStorage<S>, init: impl FnOnce() -> S, ) -> Self { if can_be_preempted(&irq) { panic!("`PeripheralMutex` cannot be created in an interrupt with higher priority than the interrupt it wraps"); } let state_ptr = storage.0.as_mut_ptr(); // Safety: The pointer is valid and not used by anyone else // because we have the `&mut StateStorage`. state_ptr.write(init()); irq.disable(); irq.set_handler(|p| { // Safety: it's OK to get a &mut to the state, since // - We checked that the thread owning the `PeripheralMutex` can't preempt us in `new`. // Interrupts' priorities can only be changed with raw embassy `Interrupts`, // which can't safely store a `PeripheralMutex` across invocations. // - We can't have preempted a with() call because the irq is disabled during it. let state = &mut *(p as *mut S); state.on_interrupt(); }); irq.set_handler_context(state_ptr as *mut ()); irq.enable(); Self { irq, state: state_ptr, _phantom: PhantomData, } } pub fn with<R>(&mut self, f: impl FnOnce(&mut S) -> R) -> R { self.irq.disable(); // Safety: it's OK to get a &mut to the state, since the irq is disabled. let state = unsafe { &mut *self.state }; let r = f(state); self.irq.enable(); r } /// Returns whether the wrapped interrupt is currently in a pending state. pub fn is_pending(&self) -> bool { self.irq.is_pending() } /// Forces the wrapped interrupt into a pending state. pub fn pend(&self) { self.irq.pend() } /// Forces the wrapped interrupt out of a pending state. pub fn unpend(&self) { self.irq.unpend() } /// Gets the priority of the wrapped interrupt. pub fn priority(&self) -> <S::Interrupt as Interrupt>::Priority { self.irq.get_priority() } } impl<'a, S: PeripheralState> Drop for PeripheralMutex<'a, S> { fn drop(&mut self) { self.irq.disable(); self.irq.remove_handler(); // safety: // - we initialized the state in `new`, so we know it's initialized. // - the irq is disabled, so it won't preempt us while dropping. unsafe { self.state.drop_in_place() } } }
33.019108
148
0.593171
e57c6eb3e671db64e99ab34f6a3a81b6f8ff7abb
20,986
use crate::{ command::Command, physics::{Collider, Joint, RigidBody}, scene::commands::SceneContext, Physics, }; use rg3d::{ core::{ algebra::{UnitQuaternion, Vector3}, pool::{ErasedHandle, Handle, Ticket}, }, physics3d::desc::{ColliderShapeDesc, JointParamsDesc}, scene::node::Node, }; #[derive(Debug)] pub struct AddJointCommand { ticket: Option<Ticket<Joint>>, handle: Handle<Joint>, joint: Option<Joint>, } impl AddJointCommand { pub fn new(node: Joint) -> Self { Self { ticket: None, handle: Default::default(), joint: Some(node), } } } impl<'a> Command<'a> for AddJointCommand { type Context = SceneContext<'a>; fn name(&mut self, _context: &Self::Context) -> String { "Add Joint".to_owned() } fn execute(&mut self, context: &mut Self::Context) { match self.ticket.take() { None => { self.handle = context .editor_scene .physics .joints .spawn(self.joint.take().unwrap()); } Some(ticket) => { let handle = context .editor_scene .physics .joints .put_back(ticket, self.joint.take().unwrap()); assert_eq!(handle, self.handle); } } } fn revert(&mut self, context: &mut Self::Context) { let (ticket, node) = context .editor_scene .physics .joints .take_reserve(self.handle); self.ticket = Some(ticket); self.joint = Some(node); } fn finalize(&mut self, context: &mut Self::Context) { if let Some(ticket) = self.ticket.take() { context.editor_scene.physics.joints.forget_ticket(ticket) } } } #[derive(Debug)] pub struct DeleteJointCommand { handle: Handle<Joint>, ticket: Option<Ticket<Joint>>, node: Option<Joint>, } impl DeleteJointCommand { pub fn new(handle: Handle<Joint>) -> Self { Self { handle, ticket: None, node: None, } } } impl<'a> Command<'a> for DeleteJointCommand { type Context = SceneContext<'a>; fn name(&mut self, _context: &Self::Context) -> String { "Delete Joint".to_owned() } fn execute(&mut self, context: &mut Self::Context) { let (ticket, node) = context .editor_scene .physics .joints .take_reserve(self.handle); self.node = Some(node); self.ticket = Some(ticket); } fn revert(&mut self, context: &mut Self::Context) { self.handle = context .editor_scene .physics .joints .put_back(self.ticket.take().unwrap(), self.node.take().unwrap()); } fn finalize(&mut self, context: &mut Self::Context) { if let Some(ticket) = self.ticket.take() { context.editor_scene.physics.joints.forget_ticket(ticket) } } } #[derive(Debug)] pub struct SetBodyCommand { node: Handle<Node>, ticket: Option<Ticket<RigidBody>>, handle: Handle<RigidBody>, body: Option<RigidBody>, } impl SetBodyCommand { pub fn new(node: Handle<Node>, body: RigidBody) -> Self { Self { node, ticket: None, handle: Default::default(), body: Some(body), } } } impl<'a> Command<'a> for SetBodyCommand { type Context = SceneContext<'a>; fn name(&mut self, _context: &Self::Context) -> String { "Set Node Body".to_owned() } fn execute(&mut self, context: &mut Self::Context) { match self.ticket.take() { None => { self.handle = context .editor_scene .physics .bodies .spawn(self.body.take().unwrap()); } Some(ticket) => { context .editor_scene .physics .bodies .put_back(ticket, self.body.take().unwrap()); } } context .editor_scene .physics .binder .insert(self.node, self.handle); } fn revert(&mut self, context: &mut Self::Context) { let (ticket, node) = context .editor_scene .physics .bodies .take_reserve(self.handle); self.ticket = Some(ticket); self.body = Some(node); context .editor_scene .physics .binder .remove_by_key(&self.node); } fn finalize(&mut self, context: &mut Self::Context) { if let Some(ticket) = self.ticket.take() { context.editor_scene.physics.bodies.forget_ticket(ticket); context .editor_scene .physics .binder .remove_by_key(&self.node); } } } #[derive(Debug)] pub struct SetColliderCommand { body: Handle<RigidBody>, ticket: Option<Ticket<Collider>>, handle: Handle<Collider>, collider: Option<Collider>, } impl SetColliderCommand { pub fn new(body: Handle<RigidBody>, collider: Collider) -> Self { Self { body, ticket: None, handle: Default::default(), collider: Some(collider), } } } impl<'a> Command<'a> for SetColliderCommand { type Context = SceneContext<'a>; fn name(&mut self, _context: &Self::Context) -> String { "Set Collider".to_owned() } fn execute(&mut self, context: &mut Self::Context) { match self.ticket.take() { None => { self.handle = context .editor_scene .physics .colliders .spawn(self.collider.take().unwrap()); } Some(ticket) => { context .editor_scene .physics .colliders .put_back(ticket, self.collider.take().unwrap()); } } context.editor_scene.physics.colliders[self.handle].parent = self.body.into(); context.editor_scene.physics.bodies[self.body] .colliders .push(self.handle.into()); } fn revert(&mut self, context: &mut Self::Context) { let (ticket, mut collider) = context .editor_scene .physics .colliders .take_reserve(self.handle); collider.parent = Default::default(); self.ticket = Some(ticket); self.collider = Some(collider); let body = &mut context.editor_scene.physics.bodies[self.body]; body.colliders.remove( body.colliders .iter() .position(|&c| c == ErasedHandle::from(self.handle)) .unwrap(), ); } fn finalize(&mut self, context: &mut Self::Context) { if let Some(ticket) = self.ticket.take() { context.editor_scene.physics.colliders.forget_ticket(ticket); } } } #[derive(Debug)] pub struct DeleteBodyCommand { handle: Handle<RigidBody>, ticket: Option<Ticket<RigidBody>>, body: Option<RigidBody>, node: Handle<Node>, } impl DeleteBodyCommand { pub fn new(handle: Handle<RigidBody>) -> Self { Self { handle, ticket: None, body: None, node: Handle::NONE, } } } impl<'a> Command<'a> for DeleteBodyCommand { type Context = SceneContext<'a>; fn name(&mut self, _context: &Self::Context) -> String { "Delete Body".to_owned() } fn execute(&mut self, context: &mut Self::Context) { let (ticket, node) = context .editor_scene .physics .bodies .take_reserve(self.handle); self.body = Some(node); self.ticket = Some(ticket); self.node = context.editor_scene.physics.unbind_by_body(self.handle); } fn revert(&mut self, context: &mut Self::Context) { self.handle = context .editor_scene .physics .bodies .put_back(self.ticket.take().unwrap(), self.body.take().unwrap()); context .editor_scene .physics .binder .insert(self.node, self.handle); } fn finalize(&mut self, context: &mut Self::Context) { if let Some(ticket) = self.ticket.take() { context.editor_scene.physics.bodies.forget_ticket(ticket) } } } #[derive(Debug)] pub struct DeleteColliderCommand { handle: Handle<Collider>, ticket: Option<Ticket<Collider>>, collider: Option<Collider>, body: Handle<RigidBody>, } impl DeleteColliderCommand { pub fn new(handle: Handle<Collider>) -> Self { Self { handle, ticket: None, collider: None, body: Handle::NONE, } } } impl<'a> Command<'a> for DeleteColliderCommand { type Context = SceneContext<'a>; fn name(&mut self, _context: &Self::Context) -> String { "Delete Collider".to_owned() } fn execute(&mut self, context: &mut Self::Context) { let (ticket, collider) = context .editor_scene .physics .colliders .take_reserve(self.handle); self.body = collider.parent.into(); self.collider = Some(collider); self.ticket = Some(ticket); let body = &mut context.editor_scene.physics.bodies[self.body]; body.colliders.remove( body.colliders .iter() .position(|&c| c == ErasedHandle::from(self.handle)) .unwrap(), ); } fn revert(&mut self, context: &mut Self::Context) { self.handle = context .editor_scene .physics .colliders .put_back(self.ticket.take().unwrap(), self.collider.take().unwrap()); let body = &mut context.editor_scene.physics.bodies[self.body]; body.colliders.push(self.handle.into()); } fn finalize(&mut self, context: &mut Self::Context) { if let Some(ticket) = self.ticket.take() { context.editor_scene.physics.colliders.forget_ticket(ticket) } } } macro_rules! define_physics_command { ($name:ident($human_readable_name:expr, $handle_type:ty, $value_type:ty) where fn swap($self:ident, $physics:ident) $apply_method:block ) => { #[derive(Debug)] pub struct $name { handle: Handle<$handle_type>, value: $value_type, } impl $name { pub fn new(handle: Handle<$handle_type>, value: $value_type) -> Self { Self { handle, value } } fn swap(&mut $self, $physics: &mut Physics) { $apply_method } } impl<'a> Command<'a> for $name { type Context = SceneContext<'a>; fn name(&mut self, _context: &Self::Context) -> String { $human_readable_name.to_owned() } fn execute(&mut self, context: &mut Self::Context) { self.swap(&mut context.editor_scene.physics); } fn revert(&mut self, context: &mut Self::Context) { self.swap(&mut context.editor_scene.physics); } } }; } macro_rules! define_body_command { ($name:ident($human_readable_name:expr, $value_type:ty) where fn swap($self:ident, $physics: ident, $body:ident) $apply_method:block ) => { define_physics_command!($name($human_readable_name, RigidBody, $value_type) where fn swap($self, $physics) { let $body = &mut $physics.bodies[$self.handle]; $apply_method }); }; } macro_rules! define_collider_command { ($name:ident($human_readable_name:expr, $value_type:ty) where fn swap($self:ident, $physics:ident, $collider:ident) $apply_method:block ) => { define_physics_command!($name($human_readable_name, Collider, $value_type) where fn swap($self, $physics) { let $collider = &mut $physics.colliders[$self.handle]; $apply_method }); }; } macro_rules! define_joint_command { ($name:ident($human_readable_name:expr, $value_type:ty) where fn swap($self:ident, $physics:ident, $joint:ident) $apply_method:block ) => { define_physics_command!($name($human_readable_name, Joint, $value_type) where fn swap($self, $physics) { let $joint = &mut $physics.joints[$self.handle]; $apply_method }); }; } macro_rules! define_joint_variant_command { ($name:ident($human_readable_name:expr, $value_type:ty) where fn swap($self:ident, $physics:ident, $variant:ident, $var:ident) $apply_method:block ) => { define_physics_command!($name($human_readable_name, Joint, $value_type) where fn swap($self, $physics) { let joint = &mut $physics.joints[$self.handle]; if let JointParamsDesc::$variant($var) = &mut joint.params { $apply_method } else { unreachable!(); } }); }; } macro_rules! define_collider_variant_command { ($name:ident($human_readable_name:expr, $value_type:ty) where fn swap($self:ident, $physics:ident, $variant:ident, $var:ident) $apply_method:block ) => { define_physics_command!($name($human_readable_name, Collider, $value_type) where fn swap($self, $physics) { let collider = &mut $physics.colliders[$self.handle]; if let ColliderShapeDesc::$variant($var) = &mut collider.shape { $apply_method } else { unreachable!(); } }); }; } define_body_command!(SetBodyMassCommand("Set Body Mass", f32) where fn swap(self, physics, body) { std::mem::swap(&mut body.mass, &mut self.value); }); define_collider_command!(SetColliderFrictionCommand("Set Collider Friction", f32) where fn swap(self, physics, collider) { std::mem::swap(&mut collider.friction, &mut self.value); }); define_collider_command!(SetColliderRestitutionCommand("Set Collider Restitution", f32) where fn swap(self, physics, collider) { std::mem::swap(&mut collider.restitution, &mut self.value); }); define_collider_command!(SetColliderPositionCommand("Set Collider Position", Vector3<f32>) where fn swap(self, physics, collider) { std::mem::swap(&mut collider.translation, &mut self.value); }); define_collider_command!(SetColliderRotationCommand("Set Collider Rotation", UnitQuaternion<f32>) where fn swap(self, physics, collider) { std::mem::swap(&mut collider.rotation, &mut self.value); }); define_collider_command!(SetColliderIsSensorCommand("Set Collider Is Sensor", bool) where fn swap(self, physics, collider) { std::mem::swap(&mut collider.is_sensor, &mut self.value); }); define_collider_command!(SetColliderCollisionGroupsMembershipsCommand("Set Collider Collision Groups Memberships", u32) where fn swap(self, physics, collider) { std::mem::swap(&mut collider.collision_groups.memberships, &mut self.value); }); define_collider_command!(SetColliderCollisionGroupsFilterCommand("Set Collider Collision Groups Filter", u32) where fn swap(self, physics, collider) { std::mem::swap(&mut collider.collision_groups.filter, &mut self.value); }); define_collider_variant_command!(SetCylinderHalfHeightCommand("Set Cylinder Half Height", f32) where fn swap(self, physics, Cylinder, cylinder) { std::mem::swap(&mut cylinder.half_height, &mut self.value); }); define_collider_variant_command!(SetCylinderRadiusCommand("Set Cylinder Radius", f32) where fn swap(self, physics, Cylinder, cylinder) { std::mem::swap(&mut cylinder.radius, &mut self.value); }); define_collider_variant_command!(SetConeHalfHeightCommand("Set Cone Half Height", f32) where fn swap(self, physics, Cone, cone) { std::mem::swap(&mut cone.half_height, &mut self.value); }); define_collider_variant_command!(SetConeRadiusCommand("Set Cone Radius", f32) where fn swap(self, physics, Cone, cone) { std::mem::swap(&mut cone.radius, &mut self.value); }); define_collider_variant_command!(SetCuboidHalfExtentsCommand("Set Cuboid Half Extents", Vector3<f32>) where fn swap(self, physics, Cuboid, cuboid) { std::mem::swap(&mut cuboid.half_extents, &mut self.value); }); define_collider_variant_command!(SetCapsuleRadiusCommand("Set Capsule Radius", f32) where fn swap(self, physics, Capsule, capsule) { std::mem::swap(&mut capsule.radius, &mut self.value); }); define_collider_variant_command!(SetCapsuleBeginCommand("Set Capsule Begin", Vector3<f32>) where fn swap(self, physics, Capsule, capsule) { std::mem::swap(&mut capsule.begin, &mut self.value); }); define_collider_variant_command!(SetCapsuleEndCommand("Set Capsule End", Vector3<f32>) where fn swap(self, physics, Capsule, capsule) { std::mem::swap(&mut capsule.end, &mut self.value); }); define_collider_variant_command!(SetBallRadiusCommand("Set Ball Radius", f32) where fn swap(self, physics, Ball, ball) { std::mem::swap(&mut ball.radius, &mut self.value); }); define_joint_variant_command!(SetBallJointAnchor1Command("Set Ball Joint Anchor 1", Vector3<f32>) where fn swap(self, physics, BallJoint, ball) { std::mem::swap(&mut ball.local_anchor1, &mut self.value); }); define_joint_variant_command!(SetBallJointAnchor2Command("Set Ball Joint Anchor 2", Vector3<f32>) where fn swap(self, physics, BallJoint, ball) { std::mem::swap(&mut ball.local_anchor2, &mut self.value); }); define_joint_variant_command!(SetFixedJointAnchor1TranslationCommand("Set Fixed Joint Anchor 1 Translation", Vector3<f32>) where fn swap(self, physics, FixedJoint, fixed) { std::mem::swap(&mut fixed.local_anchor1_translation, &mut self.value); }); define_joint_variant_command!(SetFixedJointAnchor2TranslationCommand("Set Fixed Joint Anchor 2 Translation", Vector3<f32>) where fn swap(self, physics, FixedJoint, fixed) { std::mem::swap(&mut fixed.local_anchor2_translation, &mut self.value); }); define_joint_variant_command!(SetFixedJointAnchor1RotationCommand("Set Fixed Joint Anchor 1 Rotation", UnitQuaternion<f32>) where fn swap(self, physics, FixedJoint, fixed) { std::mem::swap(&mut fixed.local_anchor1_rotation, &mut self.value); }); define_joint_variant_command!(SetFixedJointAnchor2RotationCommand("Set Fixed Joint Anchor 2 Rotation", UnitQuaternion<f32>) where fn swap(self, physics, FixedJoint, fixed) { std::mem::swap(&mut fixed.local_anchor2_rotation, &mut self.value); }); define_joint_variant_command!(SetRevoluteJointAnchor1Command("Set Revolute Joint Anchor 1", Vector3<f32>) where fn swap(self, physics, RevoluteJoint, revolute) { std::mem::swap(&mut revolute.local_anchor1, &mut self.value); }); define_joint_variant_command!(SetRevoluteJointAxis1Command("Set Revolute Joint Axis 1", Vector3<f32>) where fn swap(self, physics, RevoluteJoint, revolute) { std::mem::swap(&mut revolute.local_axis1, &mut self.value); }); define_joint_variant_command!(SetRevoluteJointAnchor2Command("Set Revolute Joint Anchor 2", Vector3<f32>) where fn swap(self, physics, RevoluteJoint, revolute) { std::mem::swap(&mut revolute.local_anchor2, &mut self.value); }); define_joint_variant_command!(SetRevoluteJointAxis2Command("Set Prismatic Joint Axis 2", Vector3<f32>) where fn swap(self, physics, RevoluteJoint, revolute) { std::mem::swap(&mut revolute.local_axis2, &mut self.value); }); define_joint_variant_command!(SetPrismaticJointAnchor1Command("Set Prismatic Joint Anchor 1", Vector3<f32>) where fn swap(self, physics, PrismaticJoint, prismatic) { std::mem::swap(&mut prismatic.local_anchor1, &mut self.value); }); define_joint_variant_command!(SetPrismaticJointAxis1Command("Set Prismatic Joint Axis 1", Vector3<f32>) where fn swap(self, physics, PrismaticJoint, prismatic) { std::mem::swap(&mut prismatic.local_axis1, &mut self.value); }); define_joint_variant_command!(SetPrismaticJointAnchor2Command("Set Prismatic Joint Anchor 2", Vector3<f32>) where fn swap(self, physics, PrismaticJoint, prismatic) { std::mem::swap(&mut prismatic.local_anchor2, &mut self.value); }); define_joint_variant_command!(SetPrismaticJointAxis2Command("Set Prismatic Joint Axis 2", Vector3<f32>) where fn swap(self, physics, PrismaticJoint, prismatic) { std::mem::swap(&mut prismatic.local_axis2, &mut self.value); }); define_joint_command!(SetJointConnectedBodyCommand("Set Joint Connected Body", ErasedHandle) where fn swap(self, physics, joint) { std::mem::swap(&mut joint.body2, &mut self.value); });
34.179153
173
0.614553
ac4b3b5d13ca0a25836c63ffdc4be02b38b5ece0
1,957
use std::ops::{Deref, DerefMut}; pub trait SizedArray<T>: Copy + Clone { type Storage: AsRef<[T]> + AsMut<[T]> + Copy + Clone; const ROWS: usize; const COLS: usize; fn all(alpha: T) -> Self::Storage; } macro_rules! sized_array_impl { ($type: ident, $rows: expr, $cols: expr) => { #[derive(Copy, Clone)] pub struct $type; impl<T: Copy> $crate::core::SizedArray<T> for $type { type Storage = [T; $rows * $cols]; const ROWS: usize = $rows; const COLS: usize = $cols; fn all(alpha: T) -> Self::Storage { [alpha; $rows * $cols] } } }; } sized_array_impl!(SizedArray12, 1, 2); sized_array_impl!(SizedArray13, 1, 3); sized_array_impl!(SizedArray14, 1, 4); sized_array_impl!(SizedArray16, 1, 6); sized_array_impl!(SizedArray21, 2, 1); sized_array_impl!(SizedArray31, 3, 1); sized_array_impl!(SizedArray41, 4, 1); sized_array_impl!(SizedArray61, 6, 1); sized_array_impl!(SizedArray22, 2, 2); sized_array_impl!(SizedArray23, 2, 3); sized_array_impl!(SizedArray32, 3, 2); sized_array_impl!(SizedArray33, 3, 3); sized_array_impl!(SizedArray34, 3, 4); sized_array_impl!(SizedArray43, 4, 3); sized_array_impl!(SizedArray44, 4, 4); // fixme workaround for missing standard derives in stdlib for arrays with len > 32 #[derive(Clone, Copy)] #[repr(C)] pub struct Array66<T>([T; 6 * 6]); impl<T> AsRef<[T]> for Array66<T> { fn as_ref(&self) -> &[T] { self.0.as_ref() } } impl<T> AsMut<[T]> for Array66<T> { fn as_mut(&mut self) -> &mut [T] { self.0.as_mut() } } impl<T> Deref for Array66<T> { type Target = [T]; fn deref(&self) -> &Self::Target { self.as_ref() } } impl<T> DerefMut for Array66<T> { fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut() } } #[derive(Copy, Clone)] pub struct SizedArray66; impl<T: Copy> SizedArray<T> for SizedArray66 { type Storage = Array66<T>; const ROWS: usize = 6; const COLS: usize = 6; fn all(alpha: T) -> Self::Storage { Array66([alpha; 6 * 6]) } }
20.819149
83
0.652529
39343aa694879990ddef190402537eba2b76789f
9,710
mod extension; mod mock_types; mod pallet_pink; use std::time::{Duration, Instant}; use crate::types::{AccountId, Balance, BlockNumber, Hash, Hashing, Index}; use frame_support::{ parameter_types, traits::ConstU128, weights::{constants::WEIGHT_PER_SECOND, Weight}, }; use pallet_contracts::{Config, Frame, Schedule}; use sp_runtime::{ generic::Header, traits::{Convert, IdentityLookup}, Perbill, }; pub use extension::{get_side_effects, ExecSideEffects}; pub use pink_extension::{Message, OspMessage, PinkEvent}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<PinkRuntime>; type Block = frame_system::mocking::MockBlock<PinkRuntime>; frame_support::construct_runtime! { pub enum PinkRuntime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event<T>}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Randomness: pallet_randomness_collective_flip::{Pallet, Storage}, Contracts: pallet_contracts::{Pallet, Call, Storage, Event<T>}, Pink: pallet_pink::{Pallet, Storage}, } } parameter_types! { pub const BlockHashCount: u32 = 250; pub RuntimeBlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); pub static ExistentialDeposit: u64 = 0; } impl pallet_pink::Config for PinkRuntime {} impl frame_system::Config for PinkRuntime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = (); type DbWeight = (); type Origin = Origin; type Index = Index; type BlockNumber = BlockNumber; type Hash = Hash; type Call = Call; type Hashing = Hashing; type AccountId = AccountId; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header<Self::BlockNumber, Self::Hashing>; type Event = Event; type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_randomness_collective_flip::Config for PinkRuntime {} parameter_types! { pub const MinimumPeriod: u64 = 1; } impl pallet_timestamp::Config for PinkRuntime { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } parameter_types! { pub const SignedClaimHandicap: u32 = 2; pub const TombstoneDeposit: u64 = 16; pub const DepositPerContract: u64 = 8 * DepositPerStorageByte::get(); pub const DepositPerStorageByte: u64 = 10_000; pub const DepositPerStorageItem: u64 = 10_000; pub RentFraction: Perbill = Perbill::from_rational(4u32, 10_000u32); pub const SurchargeReward: u64 = 500_000; pub const MaxValueSize: u32 = 16_384; pub const DeletionQueueDepth: u32 = 1024; pub const DeletionWeightLimit: Weight = 500_000_000_000; pub const MaxCodeSize: u32 = 2 * 1024 * 1024; pub DefaultSchedule: Schedule<PinkRuntime> = { let mut schedule = <Schedule<PinkRuntime>>::default(); schedule.limits.code_len = MaxCodeSize::get(); schedule }; pub const TransactionByteFee: u64 = 0; } impl Convert<Weight, Balance> for PinkRuntime { fn convert(w: Weight) -> Balance { w as _ } } impl Config for PinkRuntime { type Time = Timestamp; type Randomness = Randomness; type Currency = mock_types::NoCurrency; type Event = Event; type Call = Call; type CallFilter = frame_support::traits::Everything; type CallStack = [Frame<Self>; 31]; type WeightPrice = Self; type WeightInfo = (); type ChainExtension = extension::PinkExtension; type DeletionQueueDepth = DeletionQueueDepth; type DeletionWeightLimit = DeletionWeightLimit; type Schedule = DefaultSchedule; type DepositPerByte = ConstU128<0>; type DepositPerItem = ConstU128<0>; type AddressGenerator = Pink; type ContractAccessWeight = pallet_contracts::DefaultContractAccessWeight<RuntimeBlockWeights>; } #[derive(Clone, Copy)] pub enum CallMode { Query, Command, } struct CallInfo { mode: CallMode, start_at: Instant, } environmental::environmental!(call_info: CallInfo); pub fn using_mode<T>(mode: CallMode, f: impl FnOnce() -> T) -> T { let mut info = CallInfo { mode, start_at: Instant::now(), }; call_info::using(&mut info, f) } pub fn get_call_mode() -> Option<CallMode> { call_info::with(|info| info.mode) } pub fn get_call_elapsed() -> Option<Duration> { call_info::with(|info| info.start_at.elapsed()) } #[cfg(test)] mod tests { use pallet_contracts::Config; use sp_runtime::{traits::Hash, AccountId32}; use crate::{ runtime::{Contracts, Origin, PinkRuntime}, types::{ENOUGH, GAS_LIMIT}, }; pub use frame_support::weights::Weight; pub fn compile_wat<T>(wat_bytes: &[u8]) -> wat::Result<(Vec<u8>, <T::Hashing as Hash>::Output)> where T: frame_system::Config, { let wasm_binary = wat::parse_bytes(wat_bytes)?.into_owned(); let code_hash = T::Hashing::hash(&wasm_binary); Ok((wasm_binary, code_hash)) } #[test] pub fn contract_test() { use scale::Encode; pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); let (wasm, code_hash) = compile_wat::<PinkRuntime>(include_bytes!("../tests/fixtures/event_size.wat")).unwrap(); exec::execute_with(|| { Contracts::instantiate_with_code( Origin::signed(ALICE), ENOUGH, GAS_LIMIT, None, wasm, vec![], vec![], ) .unwrap(); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); Contracts::call( Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT * 2, None, <PinkRuntime as Config>::Schedule::get() .limits .payload_len .encode(), ) .unwrap(); }); log::info!("contract OK"); } #[test] pub fn crypto_hashes_test() { pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); const GAS_LIMIT: Weight = 1_000_000_000_000_000; let (wasm, code_hash) = compile_wat::<PinkRuntime>(include_bytes!("../tests/fixtures/crypto_hashes.wat")) .unwrap(); exec::execute_with(|| { // Instantiate the CRYPTO_HASHES contract. assert!(Contracts::instantiate_with_code( Origin::signed(ALICE), 1_000_000_000_000_000, GAS_LIMIT, None, wasm, vec![], vec![], ) .is_ok()); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Perform the call. let input = b"_DEAD_BEEF"; use sp_io::hashing::*; // Wraps a hash function into a more dynamic form usable for testing. macro_rules! dyn_hash_fn { ($name:ident) => { Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) }; } // All hash functions and their associated output byte lengths. let test_cases: &[(Box<dyn Fn(&[u8]) -> Box<[u8]>>, usize)] = &[ (dyn_hash_fn!(sha2_256), 32), (dyn_hash_fn!(keccak_256), 32), (dyn_hash_fn!(blake2_256), 32), (dyn_hash_fn!(blake2_128), 16), ]; // Test the given hash functions for the input: "_DEAD_BEEF" for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { // We offset data in the contract tables by 1. let mut params = vec![(n + 1) as u8]; params.extend_from_slice(input); let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, params, false) .result .unwrap(); assert!(!result.did_revert()); let expected = hash_fn(input.as_ref()); assert_eq!(&result.data[..*expected_size], &*expected); } }) } pub mod exec { use sp_runtime::traits::BlakeTwo256; use sp_state_machine::{Backend, Ext, OverlayedChanges, StorageTransactionCache}; pub type InMemoryBackend = sp_state_machine::InMemoryBackend<BlakeTwo256>; pub fn execute_with<R>(f: impl FnOnce() -> R) -> R { let state = InMemoryBackend::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); let r = sp_externalities::set_and_run_with_externalities(&mut ext, f); overlay .commit_transaction() .expect("BUG: mis-paired transaction"); r } } }
32.693603
100
0.602678
080760aa81f30013d37fd6f2f2ee7764530577be
108,275
//! Compiler intrinsics. //! //! The corresponding definitions are in `librustc_codegen_llvm/intrinsic.rs`. //! The corresponding const implementations are in `librustc_mir/interpret/intrinsics.rs` //! //! # Const intrinsics //! //! Note: any changes to the constness of intrinsics should be discussed with the language team. //! This includes changes in the stability of the constness. //! //! In order to make an intrinsic usable at compile-time, one needs to copy the implementation //! from https://github.com/rust-lang/miri/blob/master/src/shims/intrinsics.rs to //! `librustc_mir/interpret/intrinsics.rs` and add a //! `#[rustc_const_unstable(feature = "foo", issue = "01234")]` to the intrinsic. //! //! If an intrinsic is supposed to be used from a `const fn` with a `rustc_const_stable` attribute, //! the intrinsic's attribute must be `rustc_const_stable`, too. Such a change should not be done //! without T-lang consulation, because it bakes a feature into the language that cannot be //! replicated in user code without compiler support. //! //! # Volatiles //! //! The volatile intrinsics provide operations intended to act on I/O //! memory, which are guaranteed to not be reordered by the compiler //! across other volatile intrinsics. See the LLVM documentation on //! [[volatile]]. //! //! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses //! //! # Atomics //! //! The atomic intrinsics provide common atomic operations on machine //! words, with multiple possible memory orderings. They obey the same //! semantics as C++11. See the LLVM documentation on [[atomics]]. //! //! [atomics]: http://llvm.org/docs/Atomics.html //! //! A quick refresher on memory ordering: //! //! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes //! take place after the barrier. //! * Release - a barrier for releasing a lock. Preceding reads and writes //! take place before the barrier. //! * Sequentially consistent - sequentially consistent operations are //! guaranteed to happen in order. This is the standard mode for working //! with atomic types and is equivalent to Java's `volatile`. #![unstable( feature = "core_intrinsics", reason = "intrinsics are unlikely to ever be stabilized, instead \ they should be used through stabilized interfaces \ in the rest of the standard library", issue = "none" )] #![allow(missing_docs)] use crate::marker::DiscriminantKind; use crate::mem; #[stable(feature = "drop_in_place", since = "1.8.0")] #[rustc_deprecated( reason = "no longer an intrinsic - use `ptr::drop_in_place` directly", since = "1.18.0" )] pub use crate::ptr::drop_in_place; extern "rust-intrinsic" { // N.B., these intrinsics take raw pointers because they mutate aliased // memory, which is not valid for either `&` or `&mut`. /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as both the `success` and `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as both the `success` and `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as both the `success` and `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange`][compare_exchange]. /// /// [compare_exchange]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange pub fn atomic_cxchg_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as both the `success` and `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as both the `success` and `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as both the `success` and `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Stores a value if the current value is the same as the `old` value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `success` and /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `failure` parameters. For example, /// [`AtomicBool::compare_exchange_weak`][cew]. /// /// [cew]: ../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak pub fn atomic_cxchgweak_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool); /// Loads the current value of the pointer. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `load` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::load`](../../std/sync/atomic/struct.AtomicBool.html#method.load). pub fn atomic_load<T: Copy>(src: *const T) -> T; /// Loads the current value of the pointer. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `load` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::load`](../../std/sync/atomic/struct.AtomicBool.html#method.load). pub fn atomic_load_acq<T: Copy>(src: *const T) -> T; /// Loads the current value of the pointer. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `load` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::load`](../../std/sync/atomic/struct.AtomicBool.html#method.load). pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T; pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T; /// Stores the value at the specified memory location. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `store` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::store`](../../std/sync/atomic/struct.AtomicBool.html#method.store). pub fn atomic_store<T: Copy>(dst: *mut T, val: T); /// Stores the value at the specified memory location. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `store` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::store`](../../std/sync/atomic/struct.AtomicBool.html#method.store). pub fn atomic_store_rel<T: Copy>(dst: *mut T, val: T); /// Stores the value at the specified memory location. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `store` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::store`](../../std/sync/atomic/struct.AtomicBool.html#method.store). pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T); pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T); /// Stores the value at the specified memory location, returning the old value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `swap` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg<T: Copy>(dst: *mut T, src: T) -> T; /// Stores the value at the specified memory location, returning the old value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `swap` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Stores the value at the specified memory location, returning the old value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `swap` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Stores the value at the specified memory location, returning the old value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `swap` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Stores the value at the specified memory location, returning the old value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `swap` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Adds to the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd<T: Copy>(dst: *mut T, src: T) -> T; /// Adds to the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Adds to the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Adds to the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Adds to the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Subtract from the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_sub` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub<T: Copy>(dst: *mut T, src: T) -> T; /// Subtract from the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_sub` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Subtract from the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_sub` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Subtract from the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_sub` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Subtract from the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_sub` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise and with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_and` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise and with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_and` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise and with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_and` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise and with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_and` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise and with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_and` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise nand with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise nand with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise nand with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise nand with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise nand with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise or with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_or` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise or with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_or` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise or with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_or` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise or with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_or` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise or with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_or` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise xor with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_xor` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise xor with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_xor` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise xor with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_xor` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise xor with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_xor` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Bitwise xor with the current value, returning the previous value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_xor` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_max` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html#variant.SeqCst) /// as the `order`. For example, /// [`AtomicI32::fetch_max`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_max). pub fn atomic_max<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_max` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html#variant.Acquire) /// as the `order`. For example, /// [`AtomicI32::fetch_max`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_max). pub fn atomic_max_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_max` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html#variant.Release) /// as the `order`. For example, /// [`AtomicI32::fetch_max`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_max). pub fn atomic_max_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_max` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html#variant.AcqRel) /// as the `order`. For example, /// [`AtomicI32::fetch_max`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_max). pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_max` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html#variant.Relaxed) /// as the `order`. For example, /// [`AtomicI32::fetch_max`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_max). pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_min` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html#variant.SeqCst) /// as the `order`. For example, /// [`AtomicI32::fetch_min`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_min). pub fn atomic_min<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_min` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html#variant.Acquire) /// as the `order`. For example, /// [`AtomicI32::fetch_min`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_min). pub fn atomic_min_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_min` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html#variant.Release) /// as the `order`. For example, /// [`AtomicI32::fetch_min`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_min). pub fn atomic_min_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_min` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html#variant.AcqRel) /// as the `order`. For example, /// [`AtomicI32::fetch_min`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_min). pub fn atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using a signed comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` signed integer types via the `fetch_min` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html#variant.Relaxed) /// as the `order`. For example, /// [`AtomicI32::fetch_min`](../../std/sync/atomic/struct.AtomicI32.html#method.fetch_min). pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_min` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html#variant.SeqCst) /// as the `order`. For example, /// [`AtomicU32::fetch_min`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_min). pub fn atomic_umin<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_min` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html#variant.Acquire) /// as the `order`. For example, /// [`AtomicU32::fetch_min`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_min). pub fn atomic_umin_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_min` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html#variant.Release) /// as the `order`. For example, /// [`AtomicU32::fetch_min`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_min). pub fn atomic_umin_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_min` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html#variant.AcqRel) /// as the `order`. For example, /// [`AtomicU32::fetch_min`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_min). pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Minimum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_min` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html#variant.Relaxed) /// as the `order`. For example, /// [`AtomicU32::fetch_min`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_min). pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_max` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html#variant.SeqCst) /// as the `order`. For example, /// [`AtomicU32::fetch_max`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_max). pub fn atomic_umax<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_max` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html#variant.Acquire) /// as the `order`. For example, /// [`AtomicU32::fetch_max`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_max). pub fn atomic_umax_acq<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_max` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html#variant.Release) /// as the `order`. For example, /// [`AtomicU32::fetch_max`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_max). pub fn atomic_umax_rel<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_max` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html#variant.AcqRel) /// as the `order`. For example, /// [`AtomicU32::fetch_max`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_max). pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T; /// Maximum with the current value using an unsigned comparison. /// /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` unsigned integer types via the `fetch_max` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html#variant.Relaxed) /// as the `order`. For example, /// [`AtomicU32::fetch_max`](../../std/sync/atomic/struct.AtomicU32.html#method.fetch_max). pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T; /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// /// The `locality` argument must be a constant integer and is a temporal locality specifier /// ranging from (0) - no locality, to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. pub fn prefetch_read_data<T>(data: *const T, locality: i32); /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// /// The `locality` argument must be a constant integer and is a temporal locality specifier /// ranging from (0) - no locality, to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. pub fn prefetch_write_data<T>(data: *const T, locality: i32); /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// /// The `locality` argument must be a constant integer and is a temporal locality specifier /// ranging from (0) - no locality, to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. pub fn prefetch_read_instruction<T>(data: *const T, locality: i32); /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// /// The `locality` argument must be a constant integer and is a temporal locality specifier /// ranging from (0) - no locality, to (3) - extremely local keep in cache. /// /// This intrinsic does not have a stable counterpart. pub fn prefetch_write_instruction<T>(data: *const T, locality: i32); } extern "rust-intrinsic" { /// An atomic fence. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::fence`](../../std/sync/atomic/fn.fence.html) /// by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html#variant.SeqCst) /// as the `order`. pub fn atomic_fence(); /// An atomic fence. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::fence`](../../std/sync/atomic/fn.fence.html) /// by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html#variant.Acquire) /// as the `order`. pub fn atomic_fence_acq(); /// An atomic fence. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::fence`](../../std/sync/atomic/fn.fence.html) /// by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html#variant.Release) /// as the `order`. pub fn atomic_fence_rel(); /// An atomic fence. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::fence`](../../std/sync/atomic/fn.fence.html) /// by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html#variant.AcqRel) /// as the `order`. pub fn atomic_fence_acqrel(); /// A compiler-only memory barrier. /// /// Memory accesses will never be reordered across this barrier by the /// compiler, but no instructions will be emitted for it. This is /// appropriate for operations on the same thread that may be preempted, /// such as when interacting with signal handlers. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::compiler_fence`](../../std/sync/atomic/fn.compiler_fence.html) /// by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html#variant.SeqCst) /// as the `order`. pub fn atomic_singlethreadfence(); /// A compiler-only memory barrier. /// /// Memory accesses will never be reordered across this barrier by the /// compiler, but no instructions will be emitted for it. This is /// appropriate for operations on the same thread that may be preempted, /// such as when interacting with signal handlers. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::compiler_fence`](../../std/sync/atomic/fn.compiler_fence.html) /// by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html#variant.Acquire) /// as the `order`. pub fn atomic_singlethreadfence_acq(); /// A compiler-only memory barrier. /// /// Memory accesses will never be reordered across this barrier by the /// compiler, but no instructions will be emitted for it. This is /// appropriate for operations on the same thread that may be preempted, /// such as when interacting with signal handlers. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::compiler_fence`](../../std/sync/atomic/fn.compiler_fence.html) /// by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html#variant.Release) /// as the `order`. pub fn atomic_singlethreadfence_rel(); /// A compiler-only memory barrier. /// /// Memory accesses will never be reordered across this barrier by the /// compiler, but no instructions will be emitted for it. This is /// appropriate for operations on the same thread that may be preempted, /// such as when interacting with signal handlers. /// /// The stabilized version of this intrinsic is available in /// [`std::sync::atomic::compiler_fence`](../../std/sync/atomic/fn.compiler_fence.html) /// by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html#variant.AcqRel) /// as the `order`. pub fn atomic_singlethreadfence_acqrel(); /// Magic intrinsic that derives its meaning from attributes /// attached to the function. /// /// For example, dataflow uses this to inject static assertions so /// that `rustc_peek(potentially_uninitialized)` would actually /// double-check that dataflow did indeed compute that it is /// uninitialized at that point in the control flow. /// /// This intrinsic should not be used outside of the compiler. pub fn rustc_peek<T>(_: T) -> T; /// Aborts the execution of the process. /// /// A more user-friendly and stable version of this operation is /// [`std::process::abort`](../../std/process/fn.abort.html). pub fn abort() -> !; /// Tells LLVM that this point in the code is not reachable, enabling /// further optimizations. /// /// N.B., this is very different from the `unreachable!()` macro: Unlike the /// macro, which panics when it is executed, it is *undefined behavior* to /// reach code marked with this function. /// /// The stabilized version of this intrinsic is /// [`std::hint::unreachable_unchecked`](../../std/hint/fn.unreachable_unchecked.html). pub fn unreachable() -> !; /// Informs the optimizer that a condition is always true. /// If the condition is false, the behavior is undefined. /// /// No code is generated for this intrinsic, but the optimizer will try /// to preserve it (and its condition) between passes, which may interfere /// with optimization of surrounding code and reduce performance. It should /// not be used if the invariant can be discovered by the optimizer on its /// own, or if it does not enable any significant optimizations. /// /// This intrinsic does not have a stable counterpart. pub fn assume(b: bool); /// Hints to the compiler that branch condition is likely to be true. /// Returns the value passed to it. /// /// Any use other than with `if` statements will probably not have an effect. /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_likely", issue = "none")] pub fn likely(b: bool) -> bool; /// Hints to the compiler that branch condition is likely to be false. /// Returns the value passed to it. /// /// Any use other than with `if` statements will probably not have an effect. /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_likely", issue = "none")] pub fn unlikely(b: bool) -> bool; /// Executes a breakpoint trap, for inspection by a debugger. /// /// This intrinsic does not have a stable counterpart. pub fn breakpoint(); /// The size of a type in bytes. /// /// More specifically, this is the offset in bytes between successive /// items of the same type, including alignment padding. /// /// The stabilized version of this intrinsic is /// [`std::mem::size_of`](../../std/mem/fn.size_of.html). #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")] pub fn size_of<T>() -> usize; /// Moves a value to an uninitialized memory location. /// /// Drop glue is not run on the destination. /// /// The stabilized version of this intrinsic is /// [`std::ptr::write`](../../std/ptr/fn.write.html). pub fn move_val_init<T>(dst: *mut T, src: T); /// The minimum alignment of a type. /// /// The stabilized version of this intrinsic is /// [`std::mem::align_of`](../../std/mem/fn.align_of.html). #[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")] pub fn min_align_of<T>() -> usize; /// The prefered alignment of a type. /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_pref_align_of", issue = "none")] pub fn pref_align_of<T>() -> usize; /// The size of the referenced value in bytes. /// /// The stabilized version of this intrinsic is /// [`std::mem::size_of_val`](../../std/mem/fn.size_of_val.html). pub fn size_of_val<T: ?Sized>(_: *const T) -> usize; /// The required alignment of the referenced value. /// /// The stabilized version of this intrinsic is /// [`std::mem::align_of_val`](../../std/mem/fn.align_of_val.html). pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize; /// Gets a static string slice containing the name of a type. /// /// The stabilized version of this intrinsic is /// [`std::any::type_name`](../../std/any/fn.type_name.html) #[rustc_const_unstable(feature = "const_type_name", issue = "63084")] pub fn type_name<T: ?Sized>() -> &'static str; /// Gets an identifier which is globally unique to the specified type. This /// function will return the same value for a type regardless of whichever /// crate it is invoked in. /// /// The stabilized version of this intrinsic is /// [`std::any::TypeId::of`](../../std/any/struct.TypeId.html#method.of) #[rustc_const_unstable(feature = "const_type_id", issue = "41875")] pub fn type_id<T: ?Sized + 'static>() -> u64; /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited: /// This will statically either panic, or do nothing. /// /// This intrinsic does not have a stable counterpart. pub fn assert_inhabited<T>(); /// A guard for unsafe functions that cannot ever be executed if `T` does not permit /// zero-initialization: This will statically either panic, or do nothing. /// /// This intrinsic does not have a stable counterpart. pub fn assert_zero_valid<T>(); /// A guard for unsafe functions that cannot ever be executed if `T` has invalid /// bit patterns: This will statically either panic, or do nothing. /// /// This intrinsic does not have a stable counterpart. pub fn assert_uninit_valid<T>(); /// Gets a reference to a static `Location` indicating where it was called. /// /// Consider using [`std::panic::Location::caller`](../../std/panic/struct.Location.html#method.caller) /// instead. #[rustc_const_unstable(feature = "const_caller_location", issue = "47809")] pub fn caller_location() -> &'static crate::panic::Location<'static>; /// Moves a value out of scope without running drop glue. /// /// This exists solely for [`mem::forget_unsized`](../../std/mem/fn.forget_unsized.html); /// normal `forget` uses `ManuallyDrop` instead. pub fn forget<T: ?Sized>(_: T); /// Reinterprets the bits of a value of one type as another type. /// /// Both types must have the same size. Neither the original, nor the result, /// may be an [invalid value](../../nomicon/what-unsafe-does.html). /// /// `transmute` is semantically equivalent to a bitwise move of one type /// into another. It copies the bits from the source value into the /// destination value, then forgets the original. It's equivalent to C's /// `memcpy` under the hood, just like `transmute_copy`. /// /// `transmute` is **incredibly** unsafe. There are a vast number of ways to /// cause [undefined behavior][ub] with this function. `transmute` should be /// the absolute last resort. /// /// The [nomicon](../../nomicon/transmutes.html) has additional /// documentation. /// /// [ub]: ../../reference/behavior-considered-undefined.html /// /// # Examples /// /// There are a few things that `transmute` is really useful for. /// /// Turning a pointer into a function pointer. This is *not* portable to /// machines where function pointers and data pointers have different sizes. /// /// ``` /// fn foo() -> i32 { /// 0 /// } /// let pointer = foo as *const (); /// let function = unsafe { /// std::mem::transmute::<*const (), fn() -> i32>(pointer) /// }; /// assert_eq!(function(), 0); /// ``` /// /// Extending a lifetime, or shortening an invariant lifetime. This is /// advanced, very unsafe Rust! /// /// ``` /// struct R<'a>(&'a i32); /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> { /// std::mem::transmute::<R<'b>, R<'static>>(r) /// } /// /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>) /// -> &'b mut R<'c> { /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r) /// } /// ``` /// /// # Alternatives /// /// Don't despair: many uses of `transmute` can be achieved through other means. /// Below are common applications of `transmute` which can be replaced with safer /// constructs. /// /// Turning raw bytes(`&[u8]`) to `u32`, `f64`, etc.: /// /// ``` /// let raw_bytes = [0x78, 0x56, 0x34, 0x12]; /// /// let num = unsafe { /// std::mem::transmute::<[u8; 4], u32>(raw_bytes); /// }; /// /// // use `u32::from_ne_bytes` instead /// let num = u32::from_ne_bytes(raw_bytes); /// // or use `u32::from_le_bytes` or `u32::from_be_bytes` to specify the endianness /// let num = u32::from_le_bytes(raw_bytes); /// assert_eq!(num, 0x12345678); /// let num = u32::from_be_bytes(raw_bytes); /// assert_eq!(num, 0x78563412); /// ``` /// /// Turning a pointer into a `usize`: /// /// ``` /// let ptr = &0; /// let ptr_num_transmute = unsafe { /// std::mem::transmute::<&i32, usize>(ptr) /// }; /// /// // Use an `as` cast instead /// let ptr_num_cast = ptr as *const i32 as usize; /// ``` /// /// Turning a `*mut T` into an `&mut T`: /// /// ``` /// let ptr: *mut i32 = &mut 0; /// let ref_transmuted = unsafe { /// std::mem::transmute::<*mut i32, &mut i32>(ptr) /// }; /// /// // Use a reborrow instead /// let ref_casted = unsafe { &mut *ptr }; /// ``` /// /// Turning an `&mut T` into an `&mut U`: /// /// ``` /// let ptr = &mut 0; /// let val_transmuted = unsafe { /// std::mem::transmute::<&mut i32, &mut u32>(ptr) /// }; /// /// // Now, put together `as` and reborrowing - note the chaining of `as` /// // `as` is not transitive /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) }; /// ``` /// /// Turning an `&str` into an `&[u8]`: /// /// ``` /// // this is not a good way to do this. /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") }; /// assert_eq!(slice, &[82, 117, 115, 116]); /// /// // You could use `str::as_bytes` /// let slice = "Rust".as_bytes(); /// assert_eq!(slice, &[82, 117, 115, 116]); /// /// // Or, just use a byte string, if you have control over the string /// // literal /// assert_eq!(b"Rust", &[82, 117, 115, 116]); /// ``` /// /// Turning a `Vec<&T>` into a `Vec<Option<&T>>`: /// /// ``` /// let store = [0, 1, 2, 3]; /// let v_orig = store.iter().collect::<Vec<&i32>>(); /// /// // clone the vector as we will reuse them later /// let v_clone = v_orig.clone(); /// /// // Using transmute: this relies on the unspecified data layout of `Vec`, which is a /// // bad idea and could cause Undefined Behavior. /// // However, it is no-copy. /// let v_transmuted = unsafe { /// std::mem::transmute::<Vec<&i32>, Vec<Option<&i32>>>(v_clone) /// }; /// /// let v_clone = v_orig.clone(); /// /// // This is the suggested, safe way. /// // It does copy the entire vector, though, into a new array. /// let v_collected = v_clone.into_iter() /// .map(Some) /// .collect::<Vec<Option<&i32>>>(); /// /// let v_clone = v_orig.clone(); /// /// // The no-copy, unsafe way, still using transmute, but not relying on the data layout. /// // Like the first approach, this reuses the `Vec` internals. /// // Therefore, the new inner type must have the /// // exact same size, *and the same alignment*, as the old type. /// // The same caveats exist for this method as transmute, for /// // the original inner type (`&i32`) to the converted inner type /// // (`Option<&i32>`), so read the nomicon pages linked above and also /// // consult the [`from_raw_parts`] documentation. /// let v_from_raw = unsafe { // FIXME Update this when vec_into_raw_parts is stabilized /// // Ensure the original vector is not dropped. /// let mut v_clone = std::mem::ManuallyDrop::new(v_clone); /// Vec::from_raw_parts(v_clone.as_mut_ptr() as *mut Option<&i32>, /// v_clone.len(), /// v_clone.capacity()) /// }; /// ``` /// /// [`from_raw_parts`]: ../../std/vec/struct.Vec.html#method.from_raw_parts /// /// Implementing `split_at_mut`: /// /// ``` /// use std::{slice, mem}; /// /// // There are multiple ways to do this, and there are multiple problems /// // with the following (transmute) way. /// fn split_at_mut_transmute<T>(slice: &mut [T], mid: usize) /// -> (&mut [T], &mut [T]) { /// let len = slice.len(); /// assert!(mid <= len); /// unsafe { /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice); /// // first: transmute is not typesafe; all it checks is that T and /// // U are of the same size. Second, right here, you have two /// // mutable references pointing to the same memory. /// (&mut slice[0..mid], &mut slice2[mid..len]) /// } /// } /// /// // This gets rid of the typesafety problems; `&mut *` will *only* give /// // you an `&mut T` from an `&mut T` or `*mut T`. /// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize) /// -> (&mut [T], &mut [T]) { /// let len = slice.len(); /// assert!(mid <= len); /// unsafe { /// let slice2 = &mut *(slice as *mut [T]); /// // however, you still have two mutable references pointing to /// // the same memory. /// (&mut slice[0..mid], &mut slice2[mid..len]) /// } /// } /// /// // This is how the standard library does it. This is the best method, if /// // you need to do something like this /// fn split_at_stdlib<T>(slice: &mut [T], mid: usize) /// -> (&mut [T], &mut [T]) { /// let len = slice.len(); /// assert!(mid <= len); /// unsafe { /// let ptr = slice.as_mut_ptr(); /// // This now has three mutable references pointing at the same /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1. /// // `slice` is never used after `let ptr = ...`, and so one can /// // treat it as "dead", and therefore, you only have two real /// // mutable slices. /// (slice::from_raw_parts_mut(ptr, mid), /// slice::from_raw_parts_mut(ptr.add(mid), len - mid)) /// } /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] // NOTE: While this makes the intrinsic const stable, we have some custom code in const fn // checks that prevent its use within `const fn`. #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")] pub fn transmute<T, U>(e: T) -> U; /// Returns `true` if the actual type given as `T` requires drop /// glue; returns `false` if the actual type provided for `T` /// implements `Copy`. /// /// If the actual type neither requires drop glue nor implements /// `Copy`, then the return value of this function is unspecified. /// /// The stabilized version of this intrinsic is /// [`std::mem::needs_drop`](../../std/mem/fn.needs_drop.html). #[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")] pub fn needs_drop<T>() -> bool; /// Calculates the offset from a pointer. /// /// This is implemented as an intrinsic to avoid converting to and from an /// integer, since the conversion would throw away aliasing information. /// /// # Safety /// /// Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. If either pointer is out of /// bounds or arithmetic overflow occurs then any further use of the /// returned value will result in undefined behavior. /// /// The stabilized version of this intrinsic is /// [`std::pointer::offset`](../../std/primitive.pointer.html#method.offset). #[must_use = "returns a new pointer rather than modifying its argument"] #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")] pub fn offset<T>(dst: *const T, offset: isize) -> *const T; /// Calculates the offset from a pointer, potentially wrapping. /// /// This is implemented as an intrinsic to avoid converting to and from an /// integer, since the conversion inhibits certain optimizations. /// /// # Safety /// /// Unlike the `offset` intrinsic, this intrinsic does not restrict the /// resulting pointer to point into or one byte past the end of an allocated /// object, and it wraps with two's complement arithmetic. The resulting /// value is not necessarily valid to be used to actually access memory. /// /// The stabilized version of this intrinsic is /// [`std::pointer::wrapping_offset`](../../std/primitive.pointer.html#method.wrapping_offset). #[must_use = "returns a new pointer rather than modifying its argument"] #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")] pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T; /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with /// a size of `count` * `size_of::<T>()` and an alignment of /// `min_align_of::<T>()` /// /// The volatile parameter is set to `true`, so it will not be optimized out /// unless size is equal to zero. /// /// This intrinsic does not have a stable counterpart. pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize); /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with /// a size of `count` * `size_of::<T>()` and an alignment of /// `min_align_of::<T>()` /// /// The volatile parameter is set to `true`, so it will not be optimized out /// unless size is equal to zero. /// /// This intrinsic does not have a stable counterpart. pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize); /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a /// size of `count` * `size_of::<T>()` and an alignment of /// `min_align_of::<T>()`. /// /// The volatile parameter is set to `true`, so it will not be optimized out /// unless size is equal to zero. /// /// This intrinsic does not have a stable counterpart. pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize); /// Performs a volatile load from the `src` pointer. /// /// The stabilized version of this intrinsic is /// [`std::ptr::read_volatile`](../../std/ptr/fn.read_volatile.html). pub fn volatile_load<T>(src: *const T) -> T; /// Performs a volatile store to the `dst` pointer. /// /// The stabilized version of this intrinsic is /// [`std::ptr::write_volatile`](../../std/ptr/fn.write_volatile.html). pub fn volatile_store<T>(dst: *mut T, val: T); /// Performs a volatile load from the `src` pointer /// The pointer is not required to be aligned. /// /// This intrinsic does not have a stable counterpart. pub fn unaligned_volatile_load<T>(src: *const T) -> T; /// Performs a volatile store to the `dst` pointer. /// The pointer is not required to be aligned. /// /// This intrinsic does not have a stable counterpart. pub fn unaligned_volatile_store<T>(dst: *mut T, val: T); /// Returns the square root of an `f32` /// /// The stabilized version of this intrinsic is /// [`std::f32::sqrt`](../../std/primitive.f32.html#method.sqrt) pub fn sqrtf32(x: f32) -> f32; /// Returns the square root of an `f64` /// /// The stabilized version of this intrinsic is /// [`std::f64::sqrt`](../../std/primitive.f64.html#method.sqrt) pub fn sqrtf64(x: f64) -> f64; /// Raises an `f32` to an integer power. /// /// The stabilized version of this intrinsic is /// [`std::f32::powi`](../../std/primitive.f32.html#method.powi) pub fn powif32(a: f32, x: i32) -> f32; /// Raises an `f64` to an integer power. /// /// The stabilized version of this intrinsic is /// [`std::f64::powi`](../../std/primitive.f64.html#method.powi) pub fn powif64(a: f64, x: i32) -> f64; /// Returns the sine of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::sin`](../../std/primitive.f32.html#method.sin) pub fn sinf32(x: f32) -> f32; /// Returns the sine of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::sin`](../../std/primitive.f64.html#method.sin) pub fn sinf64(x: f64) -> f64; /// Returns the cosine of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::cos`](../../std/primitive.f32.html#method.cos) pub fn cosf32(x: f32) -> f32; /// Returns the cosine of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::cos`](../../std/primitive.f64.html#method.cos) pub fn cosf64(x: f64) -> f64; /// Raises an `f32` to an `f32` power. /// /// The stabilized version of this intrinsic is /// [`std::f32::powf`](../../std/primitive.f32.html#method.powf) pub fn powf32(a: f32, x: f32) -> f32; /// Raises an `f64` to an `f64` power. /// /// The stabilized version of this intrinsic is /// [`std::f64::powf`](../../std/primitive.f64.html#method.powf) pub fn powf64(a: f64, x: f64) -> f64; /// Returns the exponential of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::exp`](../../std/primitive.f32.html#method.exp) pub fn expf32(x: f32) -> f32; /// Returns the exponential of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::exp`](../../std/primitive.f64.html#method.exp) pub fn expf64(x: f64) -> f64; /// Returns 2 raised to the power of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::exp2`](../../std/primitive.f32.html#method.exp2) pub fn exp2f32(x: f32) -> f32; /// Returns 2 raised to the power of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::exp2`](../../std/primitive.f64.html#method.exp2) pub fn exp2f64(x: f64) -> f64; /// Returns the natural logarithm of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::ln`](../../std/primitive.f32.html#method.ln) pub fn logf32(x: f32) -> f32; /// Returns the natural logarithm of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::ln`](../../std/primitive.f64.html#method.ln) pub fn logf64(x: f64) -> f64; /// Returns the base 10 logarithm of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::log10`](../../std/primitive.f32.html#method.log10) pub fn log10f32(x: f32) -> f32; /// Returns the base 10 logarithm of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::log10`](../../std/primitive.f64.html#method.log10) pub fn log10f64(x: f64) -> f64; /// Returns the base 2 logarithm of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::log2`](../../std/primitive.f32.html#method.log2) pub fn log2f32(x: f32) -> f32; /// Returns the base 2 logarithm of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::log2`](../../std/primitive.f64.html#method.log2) pub fn log2f64(x: f64) -> f64; /// Returns `a * b + c` for `f32` values. /// /// The stabilized version of this intrinsic is /// [`std::f32::mul_add`](../../std/primitive.f32.html#method.mul_add) pub fn fmaf32(a: f32, b: f32, c: f32) -> f32; /// Returns `a * b + c` for `f64` values. /// /// The stabilized version of this intrinsic is /// [`std::f64::mul_add`](../../std/primitive.f64.html#method.mul_add) pub fn fmaf64(a: f64, b: f64, c: f64) -> f64; /// Returns the absolute value of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::abs`](../../std/primitive.f32.html#method.abs) pub fn fabsf32(x: f32) -> f32; /// Returns the absolute value of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::abs`](../../std/primitive.f64.html#method.abs) pub fn fabsf64(x: f64) -> f64; /// Returns the minimum of two `f32` values. /// /// The stabilized version of this intrinsic is /// [`std::f32::min`](../../std/primitive.f32.html#method.min) pub fn minnumf32(x: f32, y: f32) -> f32; /// Returns the minimum of two `f64` values. /// /// The stabilized version of this intrinsic is /// [`std::f64::min`](../../std/primitive.f64.html#method.min) pub fn minnumf64(x: f64, y: f64) -> f64; /// Returns the maximum of two `f32` values. /// /// The stabilized version of this intrinsic is /// [`std::f32::max`](../../std/primitive.f32.html#method.max) pub fn maxnumf32(x: f32, y: f32) -> f32; /// Returns the maximum of two `f64` values. /// /// The stabilized version of this intrinsic is /// [`std::f64::max`](../../std/primitive.f64.html#method.max) pub fn maxnumf64(x: f64, y: f64) -> f64; /// Copies the sign from `y` to `x` for `f32` values. /// /// The stabilized version of this intrinsic is /// [`std::f32::copysign`](../../std/primitive.f32.html#method.copysign) pub fn copysignf32(x: f32, y: f32) -> f32; /// Copies the sign from `y` to `x` for `f64` values. /// /// The stabilized version of this intrinsic is /// [`std::f64::copysign`](../../std/primitive.f64.html#method.copysign) pub fn copysignf64(x: f64, y: f64) -> f64; /// Returns the largest integer less than or equal to an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::floor`](../../std/primitive.f32.html#method.floor) pub fn floorf32(x: f32) -> f32; /// Returns the largest integer less than or equal to an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::floor`](../../std/primitive.f64.html#method.floor) pub fn floorf64(x: f64) -> f64; /// Returns the smallest integer greater than or equal to an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::ceil`](../../std/primitive.f32.html#method.ceil) pub fn ceilf32(x: f32) -> f32; /// Returns the smallest integer greater than or equal to an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::ceil`](../../std/primitive.f64.html#method.ceil) pub fn ceilf64(x: f64) -> f64; /// Returns the integer part of an `f32`. /// /// The stabilized version of this intrinsic is /// [`std::f32::trunc`](../../std/primitive.f32.html#method.trunc) pub fn truncf32(x: f32) -> f32; /// Returns the integer part of an `f64`. /// /// The stabilized version of this intrinsic is /// [`std::f64::trunc`](../../std/primitive.f64.html#method.trunc) pub fn truncf64(x: f64) -> f64; /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception /// if the argument is not an integer. pub fn rintf32(x: f32) -> f32; /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception /// if the argument is not an integer. pub fn rintf64(x: f64) -> f64; /// Returns the nearest integer to an `f32`. /// /// This intrinsic does not have a stable counterpart. pub fn nearbyintf32(x: f32) -> f32; /// Returns the nearest integer to an `f64`. /// /// This intrinsic does not have a stable counterpart. pub fn nearbyintf64(x: f64) -> f64; /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero. /// /// The stabilized version of this intrinsic is /// [`std::f32::round`](../../std/primitive.f32.html#method.round) pub fn roundf32(x: f32) -> f32; /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero. /// /// The stabilized version of this intrinsic is /// [`std::f64::round`](../../std/primitive.f64.html#method.round) pub fn roundf64(x: f64) -> f64; /// Float addition that allows optimizations based on algebraic rules. /// May assume inputs are finite. /// /// This intrinsic does not have a stable counterpart. pub fn fadd_fast<T: Copy>(a: T, b: T) -> T; /// Float subtraction that allows optimizations based on algebraic rules. /// May assume inputs are finite. /// /// This intrinsic does not have a stable counterpart. pub fn fsub_fast<T: Copy>(a: T, b: T) -> T; /// Float multiplication that allows optimizations based on algebraic rules. /// May assume inputs are finite. /// /// This intrinsic does not have a stable counterpart. pub fn fmul_fast<T: Copy>(a: T, b: T) -> T; /// Float division that allows optimizations based on algebraic rules. /// May assume inputs are finite. /// /// This intrinsic does not have a stable counterpart. pub fn fdiv_fast<T: Copy>(a: T, b: T) -> T; /// Float remainder that allows optimizations based on algebraic rules. /// May assume inputs are finite. /// /// This intrinsic does not have a stable counterpart. pub fn frem_fast<T: Copy>(a: T, b: T) -> T; /// Convert with LLVM’s fptoui/fptosi, which may return undef for values out of range /// (<https://github.com/rust-lang/rust/issues/10184>) /// /// Stabilized as [`f32::to_int_unchecked`](../../std/primitive.f32.html#method.to_int_unchecked) /// and [`f64::to_int_unchecked`](../../std/primitive.f64.html#method.to_int_unchecked). pub fn float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int; /// Returns the number of bits set in an integer type `T` /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `count_ones` method. For example, /// [`std::u32::count_ones`](../../std/primitive.u32.html#method.count_ones) #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")] pub fn ctpop<T: Copy>(x: T) -> T; /// Returns the number of leading unset bits (zeroes) in an integer type `T`. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `leading_zeros` method. For example, /// [`std::u32::leading_zeros`](../../std/primitive.u32.html#method.leading_zeros) /// /// # Examples /// /// ``` /// #![feature(core_intrinsics)] /// /// use std::intrinsics::ctlz; /// /// let x = 0b0001_1100_u8; /// let num_leading = ctlz(x); /// assert_eq!(num_leading, 3); /// ``` /// /// An `x` with value `0` will return the bit width of `T`. /// /// ``` /// #![feature(core_intrinsics)] /// /// use std::intrinsics::ctlz; /// /// let x = 0u16; /// let num_leading = ctlz(x); /// assert_eq!(num_leading, 16); /// ``` #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")] pub fn ctlz<T: Copy>(x: T) -> T; /// Like `ctlz`, but extra-unsafe as it returns `undef` when /// given an `x` with value `0`. /// /// This intrinsic does not have a stable counterpart. /// /// # Examples /// /// ``` /// #![feature(core_intrinsics)] /// /// use std::intrinsics::ctlz_nonzero; /// /// let x = 0b0001_1100_u8; /// let num_leading = unsafe { ctlz_nonzero(x) }; /// assert_eq!(num_leading, 3); /// ``` #[rustc_const_unstable(feature = "constctlz", issue = "none")] pub fn ctlz_nonzero<T: Copy>(x: T) -> T; /// Returns the number of trailing unset bits (zeroes) in an integer type `T`. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `trailing_zeros` method. For example, /// [`std::u32::trailing_zeros`](../../std/primitive.u32.html#method.trailing_zeros) /// /// # Examples /// /// ``` /// #![feature(core_intrinsics)] /// /// use std::intrinsics::cttz; /// /// let x = 0b0011_1000_u8; /// let num_trailing = cttz(x); /// assert_eq!(num_trailing, 3); /// ``` /// /// An `x` with value `0` will return the bit width of `T`: /// /// ``` /// #![feature(core_intrinsics)] /// /// use std::intrinsics::cttz; /// /// let x = 0u16; /// let num_trailing = cttz(x); /// assert_eq!(num_trailing, 16); /// ``` #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")] pub fn cttz<T: Copy>(x: T) -> T; /// Like `cttz`, but extra-unsafe as it returns `undef` when /// given an `x` with value `0`. /// /// This intrinsic does not have a stable counterpart. /// /// # Examples /// /// ``` /// #![feature(core_intrinsics)] /// /// use std::intrinsics::cttz_nonzero; /// /// let x = 0b0011_1000_u8; /// let num_trailing = unsafe { cttz_nonzero(x) }; /// assert_eq!(num_trailing, 3); /// ``` #[rustc_const_unstable(feature = "const_cttz", issue = "none")] pub fn cttz_nonzero<T: Copy>(x: T) -> T; /// Reverses the bytes in an integer type `T`. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `swap_bytes` method. For example, /// [`std::u32::swap_bytes`](../../std/primitive.u32.html#method.swap_bytes) #[rustc_const_stable(feature = "const_bswap", since = "1.40.0")] pub fn bswap<T: Copy>(x: T) -> T; /// Reverses the bits in an integer type `T`. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `reverse_bits` method. For example, /// [`std::u32::reverse_bits`](../../std/primitive.u32.html#method.reverse_bits) #[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")] pub fn bitreverse<T: Copy>(x: T) -> T; /// Performs checked integer addition. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `overflowing_add` method. For example, /// [`std::u32::overflowing_add`](../../std/primitive.u32.html#method.overflowing_add) #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")] pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool); /// Performs checked integer subtraction /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `overflowing_sub` method. For example, /// [`std::u32::overflowing_sub`](../../std/primitive.u32.html#method.overflowing_sub) #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")] pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool); /// Performs checked integer multiplication /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `overflowing_mul` method. For example, /// [`std::u32::overflowing_mul`](../../std/primitive.u32.html#method.overflowing_mul) #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")] pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool); /// Performs an exact division, resulting in undefined behavior where /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1` /// /// This intrinsic does not have a stable counterpart. pub fn exact_div<T: Copy>(x: T, y: T) -> T; /// Performs an unchecked division, resulting in undefined behavior /// where y = 0 or x = `T::MIN` and y = -1 /// /// Safe wrappers for this intrinsic are available on the integer /// primitives via the `checked_div` method. For example, /// [`std::u32::checked_div`](../../std/primitive.u32.html#method.checked_div) #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")] pub fn unchecked_div<T: Copy>(x: T, y: T) -> T; /// Returns the remainder of an unchecked division, resulting in /// undefined behavior where y = 0 or x = `T::MIN` and y = -1 /// /// Safe wrappers for this intrinsic are available on the integer /// primitives via the `checked_rem` method. For example, /// [`std::u32::checked_rem`](../../std/primitive.u32.html#method.checked_rem) #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")] pub fn unchecked_rem<T: Copy>(x: T, y: T) -> T; /// Performs an unchecked left shift, resulting in undefined behavior when /// y < 0 or y >= N, where N is the width of T in bits. /// /// Safe wrappers for this intrinsic are available on the integer /// primitives via the `checked_shl` method. For example, /// [`std::u32::checked_shl`](../../std/primitive.u32.html#method.checked_shl) #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")] pub fn unchecked_shl<T: Copy>(x: T, y: T) -> T; /// Performs an unchecked right shift, resulting in undefined behavior when /// y < 0 or y >= N, where N is the width of T in bits. /// /// Safe wrappers for this intrinsic are available on the integer /// primitives via the `checked_shr` method. For example, /// [`std::u32::checked_shr`](../../std/primitive.u32.html#method.checked_shr) #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")] pub fn unchecked_shr<T: Copy>(x: T, y: T) -> T; /// Returns the result of an unchecked addition, resulting in /// undefined behavior when `x + y > T::MAX` or `x + y < T::MIN`. /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")] pub fn unchecked_add<T: Copy>(x: T, y: T) -> T; /// Returns the result of an unchecked subtraction, resulting in /// undefined behavior when `x - y > T::MAX` or `x - y < T::MIN`. /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")] pub fn unchecked_sub<T: Copy>(x: T, y: T) -> T; /// Returns the result of an unchecked multiplication, resulting in /// undefined behavior when `x * y > T::MAX` or `x * y < T::MIN`. /// /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")] pub fn unchecked_mul<T: Copy>(x: T, y: T) -> T; /// Performs rotate left. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `rotate_left` method. For example, /// [`std::u32::rotate_left`](../../std/primitive.u32.html#method.rotate_left) #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] pub fn rotate_left<T: Copy>(x: T, y: T) -> T; /// Performs rotate right. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `rotate_right` method. For example, /// [`std::u32::rotate_right`](../../std/primitive.u32.html#method.rotate_right) #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] pub fn rotate_right<T: Copy>(x: T, y: T) -> T; /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `checked_add` method. For example, /// [`std::u32::checked_add`](../../std/primitive.u32.html#method.checked_add) #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")] pub fn wrapping_add<T: Copy>(a: T, b: T) -> T; /// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `checked_sub` method. For example, /// [`std::u32::checked_sub`](../../std/primitive.u32.html#method.checked_sub) #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")] pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T; /// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `checked_mul` method. For example, /// [`std::u32::checked_mul`](../../std/primitive.u32.html#method.checked_mul) #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")] pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T; /// Computes `a + b`, while saturating at numeric bounds. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `saturating_add` method. For example, /// [`std::u32::saturating_add`](../../std/primitive.u32.html#method.saturating_add) #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")] pub fn saturating_add<T: Copy>(a: T, b: T) -> T; /// Computes `a - b`, while saturating at numeric bounds. /// /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `saturating_sub` method. For example, /// [`std::u32::saturating_sub`](../../std/primitive.u32.html#method.saturating_sub) #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")] pub fn saturating_sub<T: Copy>(a: T, b: T) -> T; /// Returns the value of the discriminant for the variant in 'v', /// cast to a `u64`; if `T` has no discriminant, returns 0. /// /// The stabilized version of this intrinsic is /// [`std::mem::discriminant`](../../std/mem/fn.discriminant.html) #[rustc_const_unstable(feature = "const_discriminant", issue = "69821")] pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant; /// Returns the number of variants of the type `T` cast to a `usize`; /// if `T` has no variants, returns 0. Uninhabited variants will be counted. /// /// The to-be-stabilized version of this intrinsic is /// [`std::mem::variant_count`](../../std/mem/fn.variant_count.html) #[rustc_const_unstable(feature = "variant_count", issue = "73662")] pub fn variant_count<T>() -> usize; /// Rust's "try catch" construct which invokes the function pointer `try_fn` /// with the data pointer `data`. /// /// The third argument is a function called if a panic occurs. This function /// takes the data pointer and a pointer to the target-specific exception /// object that was caught. For more information see the compiler's /// source as well as std's catch implementation. pub fn r#try(try_fn: fn(*mut u8), data: *mut u8, catch_fn: fn(*mut u8, *mut u8)) -> i32; /// Emits a `!nontemporal` store according to LLVM (see their docs). /// Probably will never become stable. pub fn nontemporal_store<T>(ptr: *mut T, val: T); /// See documentation of `<*const T>::offset_from` for details. #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "41079")] pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize; /// Internal hook used by Miri to implement unwinding. /// ICEs when encountered during non-Miri codegen. /// /// The `payload` ptr here will be exactly the one `do_catch` gets passed by `try`. /// /// Perma-unstable: do not use. pub fn miri_start_panic(payload: *mut u8) -> !; /// Internal placeholder for injecting code coverage counters when the "instrument-coverage" /// option is enabled. The placeholder is replaced with `llvm.instrprof.increment` during code /// generation. #[lang = "count_code_region"] pub fn count_code_region(index: u32, start_byte_pos: u32, end_byte_pos: u32); /// Internal marker for code coverage expressions, injected into the MIR when the /// "instrument-coverage" option is enabled. This intrinsic is not converted into a /// backend intrinsic call, but its arguments are extracted during the production of a /// "coverage map", which is injected into the generated code, as additional data. /// This marker identifies a code region and two other counters or counter expressions /// whose sum is the number of times the code region was executed. pub fn coverage_counter_add( index: u32, left_index: u32, right_index: u32, start_byte_pos: u32, end_byte_pos: u32, ); /// This marker identifies a code region and two other counters or counter expressions /// whose difference is the number of times the code region was executed. /// (See `coverage_counter_add` for more information.) pub fn coverage_counter_subtract( index: u32, left_index: u32, right_index: u32, start_byte_pos: u32, end_byte_pos: u32, ); /// This marker identifies a code region to be added to the "coverage map" to indicate source /// code that can never be reached. /// (See `coverage_counter_add` for more information.) pub fn coverage_unreachable(start_byte_pos: u32, end_byte_pos: u32); /// See documentation of `<*const T>::guaranteed_eq` for details. #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] pub fn ptr_guaranteed_eq<T>(ptr: *const T, other: *const T) -> bool; /// See documentation of `<*const T>::guaranteed_ne` for details. #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] pub fn ptr_guaranteed_ne<T>(ptr: *const T, other: *const T) -> bool; } // Some functions are defined here because they accidentally got made // available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>. // (`transmute` also falls into this category, but it cannot be wrapped due to the // check that `T` and `U` have the same size.) /// Checks whether `ptr` is properly aligned with respect to /// `align_of::<T>()`. pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool { !ptr.is_null() && ptr as usize % mem::align_of::<T>() == 0 } /// Checks whether the regions of memory starting at `src` and `dst` of size /// `count * size_of::<T>()` do *not* overlap. pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool { let src_usize = src as usize; let dst_usize = dst as usize; let size = mem::size_of::<T>().checked_mul(count).unwrap(); let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize }; // If the absolute distance between the ptrs is at least as big as the size of the buffer, // they do not overlap. diff >= size } /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source /// and destination must *not* overlap. /// /// For regions of memory which might overlap, use [`copy`] instead. /// /// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`], but /// with the argument order swapped. /// /// [`copy`]: ./fn.copy.html /// [`memcpy`]: https://en.cppreference.com/w/c/string/byte/memcpy /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes. /// /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes. /// /// * Both `src` and `dst` must be properly aligned. /// /// * The region of memory beginning at `src` with a size of `count * /// size_of::<T>()` bytes must *not* overlap with the region of memory /// beginning at `dst` with the same size. /// /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values /// in the region beginning at `*src` and the region beginning at `*dst` can /// [violate memory safety][read-ownership]. /// /// Note that even if the effectively copied size (`count * size_of::<T>()`) is /// `0`, the pointers must be non-NULL and properly aligned. /// /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ../ptr/fn.read.html /// [read-ownership]: ../ptr/fn.read.html#ownership-of-the-returned-value /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Manually implement [`Vec::append`]: /// /// ``` /// use std::ptr; /// /// /// Moves all the elements of `src` into `dst`, leaving `src` empty. /// fn append<T>(dst: &mut Vec<T>, src: &mut Vec<T>) { /// let src_len = src.len(); /// let dst_len = dst.len(); /// /// // Ensure that `dst` has enough capacity to hold all of `src`. /// dst.reserve(src_len); /// /// unsafe { /// // The call to offset is always safe because `Vec` will never /// // allocate more than `isize::MAX` bytes. /// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize); /// let src_ptr = src.as_ptr(); /// /// // Truncate `src` without dropping its contents. We do this first, /// // to avoid problems in case something further down panics. /// src.set_len(0); /// /// // The two regions cannot overlap because mutable references do /// // not alias, and two different vectors cannot own the same /// // memory. /// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len); /// /// // Notify `dst` that it now holds the contents of `src`. /// dst.set_len(dst_len + src_len); /// } /// } /// /// let mut a = vec!['r']; /// let mut b = vec!['u', 's', 't']; /// /// append(&mut a, &mut b); /// /// assert_eq!(a, &['r', 'u', 's', 't']); /// assert!(b.is_empty()); /// ``` /// /// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append #[doc(alias = "memcpy")] #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) { extern "rust-intrinsic" { fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize); } if cfg!(debug_assertions) && !(is_aligned_and_not_null(src) && is_aligned_and_not_null(dst) && is_nonoverlapping(src, dst, count)) { // Not panicking to keep codegen impact smaller. abort(); } // SAFETY: the safety contract for `copy_nonoverlapping` must be // upheld by the caller. unsafe { copy_nonoverlapping(src, dst, count) } } /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source /// and destination may overlap. /// /// If the source and destination will *never* overlap, /// [`copy_nonoverlapping`] can be used instead. /// /// `copy` is semantically equivalent to C's [`memmove`], but with the argument /// order swapped. Copying takes place as if the bytes were copied from `src` /// to a temporary array and then copied from the array to `dst`. /// /// [`copy_nonoverlapping`]: ./fn.copy_nonoverlapping.html /// [`memmove`]: https://en.cppreference.com/w/c/string/byte/memmove /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes. /// /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes. /// /// * Both `src` and `dst` must be properly aligned. /// /// Like [`read`], `copy` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the values /// in the region beginning at `*src` and the region beginning at `*dst` can /// [violate memory safety][read-ownership]. /// /// Note that even if the effectively copied size (`count * size_of::<T>()`) is /// `0`, the pointers must be non-NULL and properly aligned. /// /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ../ptr/fn.read.html /// [read-ownership]: ../ptr/fn.read.html#ownership-of-the-returned-value /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// use std::ptr; /// /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr::copy(ptr, dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[doc(alias = "memmove")] #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) { extern "rust-intrinsic" { fn copy<T>(src: *const T, dst: *mut T, count: usize); } if cfg!(debug_assertions) && !(is_aligned_and_not_null(src) && is_aligned_and_not_null(dst)) { // Not panicking to keep codegen impact smaller. abort(); } // SAFETY: the safety contract for `copy` must be upheld by the caller. unsafe { copy(src, dst, count) } } /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to /// `val`. /// /// `write_bytes` is similar to C's [`memset`], but sets `count * /// size_of::<T>()` bytes to `val`. /// /// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes. /// /// * `dst` must be properly aligned. /// /// Additionally, the caller must ensure that writing `count * /// size_of::<T>()` bytes to the given region of memory results in a valid /// value of `T`. Using a region of memory typed as a `T` that contains an /// invalid value of `T` is undefined behavior. /// /// Note that even if the effectively copied size (`count * size_of::<T>()`) is /// `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::ptr; /// /// let mut vec = vec![0u32; 4]; /// unsafe { /// let vec_ptr = vec.as_mut_ptr(); /// ptr::write_bytes(vec_ptr, 0xfe, 2); /// } /// assert_eq!(vec, [0xfefefefe, 0xfefefefe, 0, 0]); /// ``` /// /// Creating an invalid value: /// /// ``` /// use std::ptr; /// /// let mut v = Box::new(0i32); /// /// unsafe { /// // Leaks the previously held value by overwriting the `Box<T>` with /// // a null pointer. /// ptr::write_bytes(&mut v as *mut Box<i32>, 0, 1); /// } /// /// // At this point, using or dropping `v` results in undefined behavior. /// // drop(v); // ERROR /// /// // Even leaking `v` "uses" it, and hence is undefined behavior. /// // mem::forget(v); // ERROR /// /// // In fact, `v` is invalid according to basic type layout invariants, so *any* /// // operation touching it is undefined behavior. /// // let v2 = v; // ERROR /// /// unsafe { /// // Let us instead put in a valid value /// ptr::write(&mut v as *mut Box<i32>, Box::new(42i32)); /// } /// /// // Now the box is fine /// assert_eq!(*v, 42); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) { extern "rust-intrinsic" { fn write_bytes<T>(dst: *mut T, val: u8, count: usize); } debug_assert!(is_aligned_and_not_null(dst), "attempt to write to unaligned or null pointer"); // SAFETY: the safety contract for `write_bytes` must be upheld by the caller. unsafe { write_bytes(dst, val, count) } }
47.447415
107
0.630681
f8b198da3e6f1db625aee7ad42afff71e62c8764
4,443
//! # SSSF - Super Simple Simulation Framework //! //! It's crate that lets you write Rust physics simulations without boring and repetetive boilerplate. I created this for my personal usage because of my Uni classes in which we wrote lots of physical simulations but now I want to share because I think it's pretty elegant and may be useful for people in similar situations. It's well suited for small physical simulations but I can see it usefull in bigger projects too, although in bigger projects it may be a good idea to create something tailored for your problem. //! //! ## Usage //! //! Let's say you would like to build simulation of body moving with constant velocity on x-y plane. Then you need to create struct that holds information about that body in for each timestep: //! //! ```rust //! BodyParameters{ //! t: f32 // time, //! x: f32, //! y: f32, //! v_x: f32, //! v_y: f32, //! } //! ``` //! //! These parameters will be stored as history after EACH timestep, that's why we added `t` field in `BodyParameters` to keep track of history of position (`x`,`y`) and velocity (`V_x`,`v_y`) changing in time. //! Now that we took care of parameters that we want to save history of we can move to those parameters that are either constant throughout whole simulation or those that we don't need/want to save history of - for example we wouldn't like to save whole grid for Conway's Game of Life for each timestep - we would hold it in parameters without history. So coming back to the example of moving body. We can think of those parameters as environment that we don't want to track. //! ```rust //! EnvironmentParameters{ //! dt: f32 //! } //! ``` //! For purpose of this example we don't need to store there anything complicated so lets just store `dt` - delta time. //! After that we can start our simulation providing `BodyParameters`, `EnvironmentParameters` and step function for simulation like this: //! ```rust //! let mut simulation = SimManger::new( //! BodyParameters { t: 0., x: 0., y:0., v_x:1., v_y:1. }, // Initial parameters for the body //! EnvironmentParameters { dt: 0.1 }, //! |body_parameteres, environment_parameters| BodyParameters { // Defining step function //! t: body_parameters.t + environment_parameters.dt, //! x: body_parameters.x + environment_parameters.dt * body_parameters.v_x, //! y: body_parameters.y + environment_parameters.dt * body_parameters.v_y, //! v_x: body_parameters.v_x, //! v_y: body_parameters.v_y, //! }, //! ); //! ``` //! As shown above you need to provide `step_fn: FnMut(&BodyParameters, &mut EnvironmentParameters) ->BodyParameters` it has to return `BodyParameters` after this simulation step, they will be automatically saved. //! Now in order to run the simulation you call `.run(stop_fn)` where `stop_fn: Fn(&DynamicParameters, &EnvironmentParameters) -> bool` it's simple function that is checked after each timestep. If it's true, simulation stops. //! Eg. //! If you want to stop simulation when `t = 5 s` you call: //! ```simulation.run(|body_parameters, _environment_parameters| body_parameters.t == 5.);``` //! If you want to stop simulation when `x = 20 m` you call: //! ```simulation.run(|body_parameters, _environment_parameters| body_parameters.x == 20.);``` //! //! ## Writing to file //! It's very useful to save simulation history to file to analyze it further, make some beautiful plots etc. In this case you would like to implement `ToCSV` trait for `BodyParameters`. I didn't want to use `serde` for serialization because I want this framework to be super lightweight and it's not really necessary. //! You can implement `ToCSV` like this: //! ```rust //! impl ToCSV for BodyParameters { //! fn get_header() -> String { //! String::from("t,x,y") // Header for .csv file (don't contain "\n" in this string, it's added automatically) //! } //! fn get_row(&self) -> String { //! format!("{},{},{}", self.t, self.x, self.y) // Contents of .csv file (don't contain "\n" in this string, it's added automatically) //! } //! } //! ``` //! Then insted of calling `.run(stop_fn)` you have to call like this: //! ```rust //! simulation.to_file(PathBuf::from(r"output.csv")) //! .run_with_save(stop_fn); //! ``` //! mod sim_manager; pub use sim_manager::*; #[cfg(test)] mod tests;
60.040541
518
0.682872
388fc357b040f7a30a55d2f584fbbd4fa75e7379
131
pub mod ownable_instruction; pub mod ownable_processor; panoptes_sdk::declare_id!("ownab1e111111111111111111111111111111111111");
26.2
73
0.862595
fea63de9f7b61e944b5ab3469a8f4dd607f4e8c5
3,731
use serde::{Deserialize, Deserializer}; /// This helper function enables successful deserialization of versioned structs; new structs may /// include additional fields if they impl Default and are added to the end of the struct. Right /// now, this function is targeted at `bincode` deserialization; the error match may need to be /// updated if another package needs to be used in the future. pub fn default_on_eof<'de, T, D>(d: D) -> Result<T, D::Error> where D: Deserializer<'de>, T: Deserialize<'de> + Default, { let result = T::deserialize(d); match result { Err(err) if err.to_string() == "io error: unexpected end of file" => Ok(T::default()), Err(err) if err.to_string() == "io error: failed to fill whole buffer" => Ok(T::default()), result => result, } } #[cfg(test)] pub mod tests { use {super::*, bincode::deserialize}; #[test] fn test_default_on_eof() { #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] struct Foo { bar: u16, #[serde(deserialize_with = "default_on_eof")] baz: Option<u16>, #[serde(deserialize_with = "default_on_eof")] quz: String, } let data = vec![1, 0]; assert_eq!( Foo { bar: 1, baz: None, quz: "".to_string(), }, deserialize(&data).unwrap() ); let data = vec![1, 0, 0]; assert_eq!( Foo { bar: 1, baz: None, quz: "".to_string(), }, deserialize(&data).unwrap() ); let data = vec![1, 0, 1]; assert_eq!( Foo { bar: 1, baz: None, quz: "".to_string(), }, deserialize(&data).unwrap() ); let data = vec![1, 0, 1, 0]; assert_eq!( Foo { bar: 1, baz: None, quz: "".to_string(), }, deserialize(&data).unwrap() ); let data = vec![1, 0, 1, 0, 0, 1]; assert_eq!( Foo { bar: 1, baz: Some(0), quz: "".to_string(), }, deserialize(&data).unwrap() ); let data = vec![1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 116]; assert_eq!( Foo { bar: 1, baz: Some(0), quz: "t".to_string(), }, deserialize(&data).unwrap() ); } #[test] #[should_panic] fn test_default_on_eof_additional_untagged_fields() { // If later fields are not tagged `deserialize_with = "default_on_eof"`, deserialization // will panic on any missing fields/data #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] struct Foo { bar: u16, #[serde(deserialize_with = "default_on_eof")] baz: Option<u16>, quz: String, } // Fully populated struct will deserialize let data = vec![1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 116]; assert_eq!( Foo { bar: 1, baz: Some(0), quz: "t".to_string(), }, deserialize(&data).unwrap() ); // Will panic because `quz` is missing, even though `baz` is tagged let data = vec![1, 0, 1, 0]; assert_eq!( Foo { bar: 1, baz: None, quz: "".to_string(), }, deserialize(&data).unwrap() ); } }
28.265152
99
0.459126
03670a34cc6dbdf05aef11a9ad8a37528387e2fc
1,306
extern crate clap; use byte_unit::{AdjustedByte, ByteUnit}; use clap::Parser; use cli::Cli; use std::{ fs::{self, File}, io::Write, process, }; mod cli; fn main() -> compacto::Result<()> { let matches: Cli = Cli::parse(); let input = match fs::read_to_string(&matches.input) { Ok(input) => input, Err(e) => { match e.kind() { std::io::ErrorKind::NotFound => eprintln!("File not found: \"{}\"", &matches.input), _ => eprintln!("Could not read file: \"{}\"", &matches.input), }; process::exit(1); } }; let result = match matches.mode { cli::Mode::Compress => compacto::compress_json(&input)?, cli::Mode::Decompress => compacto::decompress_json(&input)?, }; let mut output = File::create(&matches.output).unwrap(); output.write_all(result.as_bytes()).unwrap(); println!( "{}, Size: {}\n{}, Size: {}", matches.input, get_file_size(&matches.input), &matches.output, get_file_size(&matches.output) ); Ok(()) } fn get_file_size(file: &str) -> AdjustedByte { let size = File::open(file).unwrap().metadata().unwrap().len().into(); byte_unit::Byte::from_bytes(size).get_adjusted_unit(ByteUnit::KB) }
26.12
100
0.559724
71c93a04ce552e75d713aacd6d41e7603d52eb6e
1,023
/** 给你一个整数 num,请你找出同时满足下面全部要求的两个整数: 两数乘积等于  num + 1 或 num + 2 以绝对差进行度量,两数大小最接近 你可以按任意顺序返回这两个整数。   示例 1: 输入:num = 8 输出:[3,3] 解释:对于 num + 1 = 9,最接近的两个因数是 3 & 3;对于 num + 2 = 10, 最接近的两个因数是 2 & 5,因此返回 3 & 3 。 示例 2: 输入:num = 123 输出:[5,25] 示例 3: 输入:num = 999 输出:[40,25]   提示: 1 <= num <= 10^9 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/closest-divisors 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 */ pub fn closest_divisors(num: i32) -> Vec<i32> { let mut ans: Vec<i32> = vec![0, 1000000000]; for i in num+1..num+3 { let cur = divide(i); if (cur[0] - cur[1]).abs() < (ans[1] - ans[0]).abs() { ans = cur; } } ans } fn divide(n: i32) -> Vec<i32> { for i in (0..=(n as f64).sqrt() as i32).rev() { if n % i == 0 { return vec![i, n / i]; } } vec![0, 1000000000] } #[cfg(test)] mod tests { use crate::closest_divisors; #[test] fn it_works() { assert_eq!(closest_divisors(170967091), vec![10754,15898]); } }
16.238095
79
0.542522
c1c7bb927492e42cf255b765d4b74518b6cc1dda
5,571
//! Provides a (currently mediocre) virtual address allocator, //! which allocates pages (not physical memory) starting from kernel_config::memory::KERNEL_TEXT_START. //! The minimum unit of allocation is a single page. use core::ops::Deref; use kernel_config::memory::{KERNEL_TEXT_START, KERNEL_TEXT_MAX_SIZE, PAGE_SIZE}; use super::{VirtualAddress, Page, PageRange}; use spin::Mutex; use alloc::collections::LinkedList; /// A group of contiguous pages, much like a hole in other allocators. struct Chunk { /// Whether or not this Chunk is currently allocated. If false, it is free. allocated: bool, /// The Page at which this chunk starts. start_page: Page, /// The size of this chunk, specified in number of pages, not bytes. size_in_pages: usize, } impl Chunk { fn as_allocated_pages(&self) -> AllocatedPages { // subtract one because it's an inclusive range let end_page = self.start_page + self.size_in_pages - 1; AllocatedPages { pages: PageRange::new(self.start_page, end_page), } } } /// Represents an allocated range of virtual addresses, specified in pages. /// These pages are not initially mapped to any physical memory frames, you must do that separately. /// This object represents ownership of those pages; if this object falls out of scope, /// it will be dropped, and the pages will be de-allocated. /// See `MappedPages` struct for a similar object that unmaps pages when dropped. #[derive(Debug)] pub struct AllocatedPages { pub pages: PageRange, } impl AllocatedPages { /// Returns the start address of the first page. pub fn start_address(&self) -> VirtualAddress { self.pages.start_address() } pub fn size_in_pages(&self) -> usize { self.pages.size_in_pages() } } impl Deref for AllocatedPages { type Target = PageRange; fn deref(&self) -> &PageRange { &self.pages } } // impl Drop for AllocatedPages { // fn drop(&mut self) { // if let Err(_) = deallocate_pages(self) { // error!("AllocatedPages::drop(): error deallocating pages"); // } // } // } lazy_static!{ static ref FREE_PAGE_LIST: Mutex<LinkedList<Chunk>> = { // we need to create the first chunk here, // which is one giant chunk that starts at KERNEL_TEXT_START // and goes until the end of the kernel free text section let initial_chunk: Chunk = Chunk { allocated: false, start_page: Page::containing_address(VirtualAddress::new_canonical(KERNEL_TEXT_START)), size_in_pages: KERNEL_TEXT_MAX_SIZE / PAGE_SIZE, }; let mut list: LinkedList<Chunk> = LinkedList::new(); list.push_front(initial_chunk); Mutex::new(list) }; } /// Convenience function for allocating pages by giving the number of bytes /// rather than the number of pages. It will still allocate whole pages /// by rounding up the number of bytes. /// See [`allocate_pages()`](fn.allocate_pages.html) pub fn allocate_pages_by_bytes(num_bytes: usize) -> Option<AllocatedPages> { let num_pages = (num_bytes + PAGE_SIZE - 1) / PAGE_SIZE; // round up allocate_pages(num_pages) } /// Allocates the given number of pages, but simply reserves the virtual addresses; /// it does not allocate actual physical memory frames nor do any mapping. /// Thus these pages aren't directly usable until they are mapped to physical frames. /// Allocation is quick, technically O(n) but generally will allocate immediately /// because the largest free chunks are stored at the front of the list. /// Fragmentation isn't cleaned up until we're out of address space, but not really a big deal. pub fn allocate_pages(num_pages: usize) -> Option<AllocatedPages> { if num_pages == 0 { warn!("allocate_pages(): requested an allocation of 0 pages... stupid!"); return None; } // the Page where the newly-allocated Chunk starts, which we'll return if successfully allocated. let mut new_start_page: Option<Page> = None; let mut locked_list = FREE_PAGE_LIST.lock(); for mut c in locked_list.iter_mut() { // skip already-allocated chunks and chunks that are too small if c.allocated || c.size_in_pages < num_pages { continue; } // here: we have found a suitable chunk let start_page = c.start_page; let remaining_size = c.size_in_pages - num_pages; if remaining_size == 0 { // if the chunk is exactly the right size, just update it in-place as 'allocated' c.allocated = true; return Some(c.as_allocated_pages()) } // here: we have the chunk and we need to split it up into two chunks assert!(c.allocated == false, "BUG: an already-allocated chunk is going to be split!"); // first, update in-place the original free (unallocated) chunk to be smaller, since we're removing pages from it c.size_in_pages = remaining_size; c.start_page += num_pages; // second, create a new chunk that has the pages we've peeled off the original chunk being split // (or rather, we create the chunk below outside of the iterator loop, so here we just tell it where to start) new_start_page = Some(start_page); break; } if let Some(p) = new_start_page { let new_chunk = Chunk { allocated: true, start_page: p, size_in_pages: num_pages, }; let ret = new_chunk.as_allocated_pages(); locked_list.push_back(new_chunk); Some(ret) } else { error!("VirtualAddressAllocator: out of virtual address space."); return None; } } #[allow(dead_code)] fn deallocate_pages(_pages: &mut AllocatedPages) -> Result<(), ()> { trace!("Virtual Address Allocator: deallocate_pages is not yet implemented, trying to dealloc: {:?}", _pages); Ok(()) // unimplemented!(); }
34.177914
115
0.71944
f8c3800583fe96f9fc0782339505ac2a3372aaf4
775
pub struct Heap {} pub struct HeapGuard<'a> { heap: &'a Heap } impl Heap { pub fn new() -> Self { Heap {} } pub fn activate(&mut self) -> HeapGuard { unimplemented!() } } // struct SwappableAllocator; // // unsafe impl GlobalAlloc for Allocator { // unsafe fn alloc(&self, layout: Layout) -> *mut u8 { // System.alloc(layout) // } // // unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { // System.dealloc(ptr, layout) // } // // unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { // System.alloc_zeroed(layout) // } // // unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { // System.realloc(ptr, layout, new_size) // } // }
22.794118
91
0.552258
0178be80cb05aeb729ce2831466b828b047c7edb
36,499
//! Error and Result module use std::cell::RefCell; use std::io::Write; use std::str::Utf8Error; use std::string::FromUtf8Error; use std::{fmt, io, result}; use bytes::BytesMut; use derive_more::{Display, From}; use http::uri::InvalidUri; use http::{header, Error as HttpError, StatusCode}; use serde::de::value::Error as DeError; use serde_json::error::Error as JsonError; use serde_urlencoded::ser::Error as FormError; use crate::body::Body; use crate::helpers::Writer; use crate::response::{Response, ResponseBuilder}; #[cfg(feature = "cookies")] pub use crate::cookie::ParseError as CookieParseError; /// A specialized [`std::result::Result`] /// for actix web operations /// /// This typedef is generally used to avoid writing out /// `actix_http::error::Error` directly and is otherwise a direct mapping to /// `Result`. pub type Result<T, E = Error> = result::Result<T, E>; /// General purpose actix web error. /// /// An actix web error is used to carry errors from `std::error` /// through actix in a convenient way. It can be created through /// converting errors with `into()`. /// /// Whenever it is created from an external object a response error is created /// for it that can be used to create an HTTP response from it this means that /// if you have access to an actix `Error` you can always get a /// `ResponseError` reference from it. pub struct Error { cause: Box<dyn ResponseError>, } impl Error { /// Returns the reference to the underlying `ResponseError`. pub fn as_response_error(&self) -> &dyn ResponseError { self.cause.as_ref() } /// Similar to `as_response_error` but downcasts. pub fn as_error<T: ResponseError + 'static>(&self) -> Option<&T> { <dyn ResponseError>::downcast_ref(self.cause.as_ref()) } } /// Error that can be converted to `Response` pub trait ResponseError: fmt::Debug + fmt::Display { /// Response's status code /// /// Internal server error is generated by default. fn status_code(&self) -> StatusCode { StatusCode::INTERNAL_SERVER_ERROR } /// Create response for error /// /// Internal server error is generated by default. fn error_response(&self) -> Response { let mut resp = Response::new(self.status_code()); let mut buf = BytesMut::new(); let _ = write!(Writer(&mut buf), "{}", self); resp.headers_mut().insert( header::CONTENT_TYPE, header::HeaderValue::from_static("text/plain; charset=utf-8"), ); resp.set_body(Body::from(buf)) } downcast_get_type_id!(); } downcast!(ResponseError); impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.cause, f) } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", &self.cause) } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { None } } impl From<()> for Error { fn from(_: ()) -> Self { Error::from(UnitError) } } impl From<std::convert::Infallible> for Error { fn from(_: std::convert::Infallible) -> Self { // `std::convert::Infallible` indicates an error // that will never happen unreachable!() } } /// Convert `Error` to a `Response` instance impl From<Error> for Response { fn from(err: Error) -> Self { Response::from_error(err) } } /// `Error` for any error that implements `ResponseError` impl<T: ResponseError + 'static> From<T> for Error { fn from(err: T) -> Error { Error { cause: Box::new(err), } } } /// Convert Response to a Error impl From<Response> for Error { fn from(res: Response) -> Error { InternalError::from_response("", res).into() } } /// Convert ResponseBuilder to a Error impl From<ResponseBuilder> for Error { fn from(mut res: ResponseBuilder) -> Error { InternalError::from_response("", res.finish()).into() } } #[derive(Debug, Display)] #[display(fmt = "UnknownError")] struct UnitError; /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`UnitError`]. impl ResponseError for UnitError {} /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`JsonError`]. impl ResponseError for JsonError {} /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`FormError`]. impl ResponseError for FormError {} #[cfg(feature = "openssl")] /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`actix_tls::accept::openssl::SslError`]. impl ResponseError for actix_tls::accept::openssl::SslError {} /// Returns [`StatusCode::BAD_REQUEST`] for [`DeError`]. impl ResponseError for DeError { fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } } /// Returns [`StatusCode::BAD_REQUEST`] for [`Utf8Error`]. impl ResponseError for Utf8Error { fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } } /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`HttpError`]. impl ResponseError for HttpError {} /// Inspects the underlying [`io::ErrorKind`] and returns an appropriate status code. /// /// If the error is [`io::ErrorKind::NotFound`], [`StatusCode::NOT_FOUND`] is returned. If the /// error is [`io::ErrorKind::PermissionDenied`], [`StatusCode::FORBIDDEN`] is returned. Otherwise, /// [`StatusCode::INTERNAL_SERVER_ERROR`] is returned. impl ResponseError for io::Error { fn status_code(&self) -> StatusCode { match self.kind() { io::ErrorKind::NotFound => StatusCode::NOT_FOUND, io::ErrorKind::PermissionDenied => StatusCode::FORBIDDEN, _ => StatusCode::INTERNAL_SERVER_ERROR, } } } /// Returns [`StatusCode::BAD_REQUEST`] for [`header::InvalidHeaderValue`]. impl ResponseError for header::InvalidHeaderValue { fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } } /// A set of errors that can occur during parsing HTTP streams #[derive(Debug, Display)] pub enum ParseError { /// An invalid `Method`, such as `GE.T`. #[display(fmt = "Invalid Method specified")] Method, /// An invalid `Uri`, such as `exam ple.domain`. #[display(fmt = "Uri error: {}", _0)] Uri(InvalidUri), /// An invalid `HttpVersion`, such as `HTP/1.1` #[display(fmt = "Invalid HTTP version specified")] Version, /// An invalid `Header`. #[display(fmt = "Invalid Header provided")] Header, /// A message head is too large to be reasonable. #[display(fmt = "Message head is too large")] TooLarge, /// A message reached EOF, but is not complete. #[display(fmt = "Message is incomplete")] Incomplete, /// An invalid `Status`, such as `1337 ELITE`. #[display(fmt = "Invalid Status provided")] Status, /// A timeout occurred waiting for an IO event. #[allow(dead_code)] #[display(fmt = "Timeout")] Timeout, /// An `io::Error` that occurred while trying to read or write to a network /// stream. #[display(fmt = "IO error: {}", _0)] Io(io::Error), /// Parsing a field as string failed #[display(fmt = "UTF8 error: {}", _0)] Utf8(Utf8Error), } /// Return `BadRequest` for `ParseError` impl ResponseError for ParseError { fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } } impl From<io::Error> for ParseError { fn from(err: io::Error) -> ParseError { ParseError::Io(err) } } impl From<InvalidUri> for ParseError { fn from(err: InvalidUri) -> ParseError { ParseError::Uri(err) } } impl From<Utf8Error> for ParseError { fn from(err: Utf8Error) -> ParseError { ParseError::Utf8(err) } } impl From<FromUtf8Error> for ParseError { fn from(err: FromUtf8Error) -> ParseError { ParseError::Utf8(err.utf8_error()) } } impl From<httparse::Error> for ParseError { fn from(err: httparse::Error) -> ParseError { match err { httparse::Error::HeaderName | httparse::Error::HeaderValue | httparse::Error::NewLine | httparse::Error::Token => ParseError::Header, httparse::Error::Status => ParseError::Status, httparse::Error::TooManyHeaders => ParseError::TooLarge, httparse::Error::Version => ParseError::Version, } } } /// A set of errors that can occur running blocking tasks in thread pool. #[derive(Debug, Display)] #[display(fmt = "Blocking thread pool is gone")] pub struct BlockingError; impl std::error::Error for BlockingError {} /// `InternalServerError` for `BlockingError` impl ResponseError for BlockingError {} #[derive(Display, Debug)] /// A set of errors that can occur during payload parsing pub enum PayloadError { /// A payload reached EOF, but is not complete. #[display( fmt = "A payload reached EOF, but is not complete. Inner error: {:?}", _0 )] Incomplete(Option<io::Error>), /// Content encoding stream corruption. #[display(fmt = "Can not decode content-encoding.")] EncodingCorrupted, /// Payload reached size limit. #[display(fmt = "Payload reached size limit.")] Overflow, /// Payload length is unknown. #[display(fmt = "Payload length is unknown.")] UnknownLength, /// HTTP/2 payload error. #[display(fmt = "{}", _0)] Http2Payload(h2::Error), /// Generic I/O error. #[display(fmt = "{}", _0)] Io(io::Error), } impl std::error::Error for PayloadError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { PayloadError::Incomplete(None) => None, PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error), PayloadError::EncodingCorrupted => None, PayloadError::Overflow => None, PayloadError::UnknownLength => None, PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error), PayloadError::Io(err) => Some(err as &dyn std::error::Error), } } } impl From<h2::Error> for PayloadError { fn from(err: h2::Error) -> Self { PayloadError::Http2Payload(err) } } impl From<Option<io::Error>> for PayloadError { fn from(err: Option<io::Error>) -> Self { PayloadError::Incomplete(err) } } impl From<io::Error> for PayloadError { fn from(err: io::Error) -> Self { PayloadError::Incomplete(Some(err)) } } impl From<BlockingError> for PayloadError { fn from(_: BlockingError) -> Self { PayloadError::Io(io::Error::new( io::ErrorKind::Other, "Operation is canceled", )) } } /// `PayloadError` returns two possible results: /// /// - `Overflow` returns `PayloadTooLarge` /// - Other errors returns `BadRequest` impl ResponseError for PayloadError { fn status_code(&self) -> StatusCode { match *self { PayloadError::Overflow => StatusCode::PAYLOAD_TOO_LARGE, _ => StatusCode::BAD_REQUEST, } } } /// Return `BadRequest` for `cookie::ParseError` #[cfg(feature = "cookies")] impl ResponseError for crate::cookie::ParseError { fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } } #[derive(Debug, Display, From)] /// A set of errors that can occur during dispatching HTTP requests pub enum DispatchError { /// Service error Service(Error), /// Upgrade service error Upgrade, /// An `io::Error` that occurred while trying to read or write to a network /// stream. #[display(fmt = "IO error: {}", _0)] Io(io::Error), /// Http request parse error. #[display(fmt = "Parse error: {}", _0)] Parse(ParseError), /// Http/2 error #[display(fmt = "{}", _0)] H2(h2::Error), /// The first request did not complete within the specified timeout. #[display(fmt = "The first request did not complete within the specified timeout")] SlowRequestTimeout, /// Disconnect timeout. Makes sense for ssl streams. #[display(fmt = "Connection shutdown timeout")] DisconnectTimeout, /// Payload is not consumed #[display(fmt = "Task is completed but request's payload is not consumed")] PayloadIsNotConsumed, /// Malformed request #[display(fmt = "Malformed request")] MalformedRequest, /// Internal error #[display(fmt = "Internal error")] InternalError, /// Unknown error #[display(fmt = "Unknown error")] Unknown, } /// A set of error that can occur during parsing content type #[derive(PartialEq, Debug, Display)] pub enum ContentTypeError { /// Can not parse content type #[display(fmt = "Can not parse content type")] ParseError, /// Unknown content encoding #[display(fmt = "Unknown content encoding")] UnknownEncoding, } impl std::error::Error for ContentTypeError {} /// Return `BadRequest` for `ContentTypeError` impl ResponseError for ContentTypeError { fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } } /// Helper type that can wrap any error and generate custom response. /// /// In following example any `io::Error` will be converted into "BAD REQUEST" /// response as opposite to *INTERNAL SERVER ERROR* which is defined by /// default. /// /// ``` /// # use std::io; /// # use actix_http::*; /// /// fn index(req: Request) -> Result<&'static str> { /// Err(error::ErrorBadRequest(io::Error::new(io::ErrorKind::Other, "error"))) /// } /// ``` pub struct InternalError<T> { cause: T, status: InternalErrorType, } enum InternalErrorType { Status(StatusCode), Response(RefCell<Option<Response>>), } impl<T> InternalError<T> { /// Create `InternalError` instance pub fn new(cause: T, status: StatusCode) -> Self { InternalError { cause, status: InternalErrorType::Status(status), } } /// Create `InternalError` with predefined `Response`. pub fn from_response(cause: T, response: Response) -> Self { InternalError { cause, status: InternalErrorType::Response(RefCell::new(Some(response))), } } } impl<T> fmt::Debug for InternalError<T> where T: fmt::Debug + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.cause, f) } } impl<T> fmt::Display for InternalError<T> where T: fmt::Display + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.cause, f) } } impl<T> ResponseError for InternalError<T> where T: fmt::Debug + fmt::Display + 'static, { fn status_code(&self) -> StatusCode { match self.status { InternalErrorType::Status(st) => st, InternalErrorType::Response(ref resp) => { if let Some(resp) = resp.borrow().as_ref() { resp.head().status } else { StatusCode::INTERNAL_SERVER_ERROR } } } } fn error_response(&self) -> Response { match self.status { InternalErrorType::Status(st) => { let mut res = Response::new(st); let mut buf = BytesMut::new(); let _ = write!(Writer(&mut buf), "{}", self); res.headers_mut().insert( header::CONTENT_TYPE, header::HeaderValue::from_static("text/plain; charset=utf-8"), ); res.set_body(Body::from(buf)) } InternalErrorType::Response(ref resp) => { if let Some(resp) = resp.borrow_mut().take() { resp } else { Response::new(StatusCode::INTERNAL_SERVER_ERROR) } } } } } /// Helper function that creates wrapper of any error and generate *BAD /// REQUEST* response. #[allow(non_snake_case)] pub fn ErrorBadRequest<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::BAD_REQUEST).into() } /// Helper function that creates wrapper of any error and generate /// *UNAUTHORIZED* response. #[allow(non_snake_case)] pub fn ErrorUnauthorized<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::UNAUTHORIZED).into() } /// Helper function that creates wrapper of any error and generate /// *PAYMENT_REQUIRED* response. #[allow(non_snake_case)] pub fn ErrorPaymentRequired<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::PAYMENT_REQUIRED).into() } /// Helper function that creates wrapper of any error and generate *FORBIDDEN* /// response. #[allow(non_snake_case)] pub fn ErrorForbidden<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::FORBIDDEN).into() } /// Helper function that creates wrapper of any error and generate *NOT FOUND* /// response. #[allow(non_snake_case)] pub fn ErrorNotFound<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::NOT_FOUND).into() } /// Helper function that creates wrapper of any error and generate *METHOD NOT /// ALLOWED* response. #[allow(non_snake_case)] pub fn ErrorMethodNotAllowed<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::METHOD_NOT_ALLOWED).into() } /// Helper function that creates wrapper of any error and generate *NOT /// ACCEPTABLE* response. #[allow(non_snake_case)] pub fn ErrorNotAcceptable<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::NOT_ACCEPTABLE).into() } /// Helper function that creates wrapper of any error and generate *PROXY /// AUTHENTICATION REQUIRED* response. #[allow(non_snake_case)] pub fn ErrorProxyAuthenticationRequired<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::PROXY_AUTHENTICATION_REQUIRED).into() } /// Helper function that creates wrapper of any error and generate *REQUEST /// TIMEOUT* response. #[allow(non_snake_case)] pub fn ErrorRequestTimeout<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::REQUEST_TIMEOUT).into() } /// Helper function that creates wrapper of any error and generate *CONFLICT* /// response. #[allow(non_snake_case)] pub fn ErrorConflict<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::CONFLICT).into() } /// Helper function that creates wrapper of any error and generate *GONE* /// response. #[allow(non_snake_case)] pub fn ErrorGone<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::GONE).into() } /// Helper function that creates wrapper of any error and generate *LENGTH /// REQUIRED* response. #[allow(non_snake_case)] pub fn ErrorLengthRequired<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::LENGTH_REQUIRED).into() } /// Helper function that creates wrapper of any error and generate /// *PAYLOAD TOO LARGE* response. #[allow(non_snake_case)] pub fn ErrorPayloadTooLarge<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::PAYLOAD_TOO_LARGE).into() } /// Helper function that creates wrapper of any error and generate /// *URI TOO LONG* response. #[allow(non_snake_case)] pub fn ErrorUriTooLong<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::URI_TOO_LONG).into() } /// Helper function that creates wrapper of any error and generate /// *UNSUPPORTED MEDIA TYPE* response. #[allow(non_snake_case)] pub fn ErrorUnsupportedMediaType<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::UNSUPPORTED_MEDIA_TYPE).into() } /// Helper function that creates wrapper of any error and generate /// *RANGE NOT SATISFIABLE* response. #[allow(non_snake_case)] pub fn ErrorRangeNotSatisfiable<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::RANGE_NOT_SATISFIABLE).into() } /// Helper function that creates wrapper of any error and generate /// *IM A TEAPOT* response. #[allow(non_snake_case)] pub fn ErrorImATeapot<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::IM_A_TEAPOT).into() } /// Helper function that creates wrapper of any error and generate /// *MISDIRECTED REQUEST* response. #[allow(non_snake_case)] pub fn ErrorMisdirectedRequest<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::MISDIRECTED_REQUEST).into() } /// Helper function that creates wrapper of any error and generate /// *UNPROCESSABLE ENTITY* response. #[allow(non_snake_case)] pub fn ErrorUnprocessableEntity<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::UNPROCESSABLE_ENTITY).into() } /// Helper function that creates wrapper of any error and generate /// *LOCKED* response. #[allow(non_snake_case)] pub fn ErrorLocked<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::LOCKED).into() } /// Helper function that creates wrapper of any error and generate /// *FAILED DEPENDENCY* response. #[allow(non_snake_case)] pub fn ErrorFailedDependency<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::FAILED_DEPENDENCY).into() } /// Helper function that creates wrapper of any error and generate /// *UPGRADE REQUIRED* response. #[allow(non_snake_case)] pub fn ErrorUpgradeRequired<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::UPGRADE_REQUIRED).into() } /// Helper function that creates wrapper of any error and generate /// *PRECONDITION FAILED* response. #[allow(non_snake_case)] pub fn ErrorPreconditionFailed<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::PRECONDITION_FAILED).into() } /// Helper function that creates wrapper of any error and generate /// *PRECONDITION REQUIRED* response. #[allow(non_snake_case)] pub fn ErrorPreconditionRequired<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::PRECONDITION_REQUIRED).into() } /// Helper function that creates wrapper of any error and generate /// *TOO MANY REQUESTS* response. #[allow(non_snake_case)] pub fn ErrorTooManyRequests<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::TOO_MANY_REQUESTS).into() } /// Helper function that creates wrapper of any error and generate /// *REQUEST HEADER FIELDS TOO LARGE* response. #[allow(non_snake_case)] pub fn ErrorRequestHeaderFieldsTooLarge<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE).into() } /// Helper function that creates wrapper of any error and generate /// *UNAVAILABLE FOR LEGAL REASONS* response. #[allow(non_snake_case)] pub fn ErrorUnavailableForLegalReasons<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::UNAVAILABLE_FOR_LEGAL_REASONS).into() } /// Helper function that creates wrapper of any error and generate /// *EXPECTATION FAILED* response. #[allow(non_snake_case)] pub fn ErrorExpectationFailed<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::EXPECTATION_FAILED).into() } /// Helper function that creates wrapper of any error and /// generate *INTERNAL SERVER ERROR* response. #[allow(non_snake_case)] pub fn ErrorInternalServerError<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::INTERNAL_SERVER_ERROR).into() } /// Helper function that creates wrapper of any error and /// generate *NOT IMPLEMENTED* response. #[allow(non_snake_case)] pub fn ErrorNotImplemented<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::NOT_IMPLEMENTED).into() } /// Helper function that creates wrapper of any error and /// generate *BAD GATEWAY* response. #[allow(non_snake_case)] pub fn ErrorBadGateway<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::BAD_GATEWAY).into() } /// Helper function that creates wrapper of any error and /// generate *SERVICE UNAVAILABLE* response. #[allow(non_snake_case)] pub fn ErrorServiceUnavailable<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::SERVICE_UNAVAILABLE).into() } /// Helper function that creates wrapper of any error and /// generate *GATEWAY TIMEOUT* response. #[allow(non_snake_case)] pub fn ErrorGatewayTimeout<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::GATEWAY_TIMEOUT).into() } /// Helper function that creates wrapper of any error and /// generate *HTTP VERSION NOT SUPPORTED* response. #[allow(non_snake_case)] pub fn ErrorHttpVersionNotSupported<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::HTTP_VERSION_NOT_SUPPORTED).into() } /// Helper function that creates wrapper of any error and /// generate *VARIANT ALSO NEGOTIATES* response. #[allow(non_snake_case)] pub fn ErrorVariantAlsoNegotiates<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::VARIANT_ALSO_NEGOTIATES).into() } /// Helper function that creates wrapper of any error and /// generate *INSUFFICIENT STORAGE* response. #[allow(non_snake_case)] pub fn ErrorInsufficientStorage<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::INSUFFICIENT_STORAGE).into() } /// Helper function that creates wrapper of any error and /// generate *LOOP DETECTED* response. #[allow(non_snake_case)] pub fn ErrorLoopDetected<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::LOOP_DETECTED).into() } /// Helper function that creates wrapper of any error and /// generate *NOT EXTENDED* response. #[allow(non_snake_case)] pub fn ErrorNotExtended<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::NOT_EXTENDED).into() } /// Helper function that creates wrapper of any error and /// generate *NETWORK AUTHENTICATION REQUIRED* response. #[allow(non_snake_case)] pub fn ErrorNetworkAuthenticationRequired<T>(err: T) -> Error where T: fmt::Debug + fmt::Display + 'static, { InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into() } #[cfg(test)] mod tests { use super::*; use http::{Error as HttpError, StatusCode}; use std::io; #[test] fn test_into_response() { let resp: Response = ParseError::Incomplete.error_response(); assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let err: HttpError = StatusCode::from_u16(10000).err().unwrap().into(); let resp: Response = err.error_response(); assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[cfg(feature = "cookies")] #[test] fn test_cookie_parse() { let resp: Response = CookieParseError::EmptyName.error_response(); assert_eq!(resp.status(), StatusCode::BAD_REQUEST); } #[test] fn test_as_response() { let orig = io::Error::new(io::ErrorKind::Other, "other"); let e: Error = ParseError::Io(orig).into(); assert_eq!(format!("{}", e.as_response_error()), "IO error: other"); } #[test] fn test_error_cause() { let orig = io::Error::new(io::ErrorKind::Other, "other"); let desc = orig.to_string(); let e = Error::from(orig); assert_eq!(format!("{}", e.as_response_error()), desc); } #[test] fn test_error_display() { let orig = io::Error::new(io::ErrorKind::Other, "other"); let desc = orig.to_string(); let e = Error::from(orig); assert_eq!(format!("{}", e), desc); } #[test] fn test_error_http_response() { let orig = io::Error::new(io::ErrorKind::Other, "other"); let e = Error::from(orig); let resp: Response = e.into(); assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[test] fn test_payload_error() { let err: PayloadError = io::Error::new(io::ErrorKind::Other, "ParseError").into(); assert!(err.to_string().contains("ParseError")); let err = PayloadError::Incomplete(None); assert_eq!( err.to_string(), "A payload reached EOF, but is not complete. Inner error: None" ); } macro_rules! from { ($from:expr => $error:pat) => { match ParseError::from($from) { err @ $error => { assert!(err.to_string().len() >= 5); } err => unreachable!("{:?}", err), } }; } macro_rules! from_and_cause { ($from:expr => $error:pat) => { match ParseError::from($from) { e @ $error => { let desc = format!("{}", e); assert_eq!(desc, format!("IO error: {}", $from)); } _ => unreachable!("{:?}", $from), } }; } #[test] fn test_from() { from_and_cause!(io::Error::new(io::ErrorKind::Other, "other") => ParseError::Io(..)); from!(httparse::Error::HeaderName => ParseError::Header); from!(httparse::Error::HeaderName => ParseError::Header); from!(httparse::Error::HeaderValue => ParseError::Header); from!(httparse::Error::NewLine => ParseError::Header); from!(httparse::Error::Status => ParseError::Status); from!(httparse::Error::Token => ParseError::Header); from!(httparse::Error::TooManyHeaders => ParseError::TooLarge); from!(httparse::Error::Version => ParseError::Version); } #[test] fn test_internal_error() { let err = InternalError::from_response(ParseError::Method, Response::Ok().into()); let resp: Response = err.error_response(); assert_eq!(resp.status(), StatusCode::OK); } #[test] fn test_error_casting() { let err = PayloadError::Overflow; let resp_err: &dyn ResponseError = &err; let err = resp_err.downcast_ref::<PayloadError>().unwrap(); assert_eq!(err.to_string(), "Payload reached size limit."); let not_err = resp_err.downcast_ref::<ContentTypeError>(); assert!(not_err.is_none()); } #[test] fn test_error_helpers() { let r: Response = ErrorBadRequest("err").into(); assert_eq!(r.status(), StatusCode::BAD_REQUEST); let r: Response = ErrorUnauthorized("err").into(); assert_eq!(r.status(), StatusCode::UNAUTHORIZED); let r: Response = ErrorPaymentRequired("err").into(); assert_eq!(r.status(), StatusCode::PAYMENT_REQUIRED); let r: Response = ErrorForbidden("err").into(); assert_eq!(r.status(), StatusCode::FORBIDDEN); let r: Response = ErrorNotFound("err").into(); assert_eq!(r.status(), StatusCode::NOT_FOUND); let r: Response = ErrorMethodNotAllowed("err").into(); assert_eq!(r.status(), StatusCode::METHOD_NOT_ALLOWED); let r: Response = ErrorNotAcceptable("err").into(); assert_eq!(r.status(), StatusCode::NOT_ACCEPTABLE); let r: Response = ErrorProxyAuthenticationRequired("err").into(); assert_eq!(r.status(), StatusCode::PROXY_AUTHENTICATION_REQUIRED); let r: Response = ErrorRequestTimeout("err").into(); assert_eq!(r.status(), StatusCode::REQUEST_TIMEOUT); let r: Response = ErrorConflict("err").into(); assert_eq!(r.status(), StatusCode::CONFLICT); let r: Response = ErrorGone("err").into(); assert_eq!(r.status(), StatusCode::GONE); let r: Response = ErrorLengthRequired("err").into(); assert_eq!(r.status(), StatusCode::LENGTH_REQUIRED); let r: Response = ErrorPreconditionFailed("err").into(); assert_eq!(r.status(), StatusCode::PRECONDITION_FAILED); let r: Response = ErrorPayloadTooLarge("err").into(); assert_eq!(r.status(), StatusCode::PAYLOAD_TOO_LARGE); let r: Response = ErrorUriTooLong("err").into(); assert_eq!(r.status(), StatusCode::URI_TOO_LONG); let r: Response = ErrorUnsupportedMediaType("err").into(); assert_eq!(r.status(), StatusCode::UNSUPPORTED_MEDIA_TYPE); let r: Response = ErrorRangeNotSatisfiable("err").into(); assert_eq!(r.status(), StatusCode::RANGE_NOT_SATISFIABLE); let r: Response = ErrorExpectationFailed("err").into(); assert_eq!(r.status(), StatusCode::EXPECTATION_FAILED); let r: Response = ErrorImATeapot("err").into(); assert_eq!(r.status(), StatusCode::IM_A_TEAPOT); let r: Response = ErrorMisdirectedRequest("err").into(); assert_eq!(r.status(), StatusCode::MISDIRECTED_REQUEST); let r: Response = ErrorUnprocessableEntity("err").into(); assert_eq!(r.status(), StatusCode::UNPROCESSABLE_ENTITY); let r: Response = ErrorLocked("err").into(); assert_eq!(r.status(), StatusCode::LOCKED); let r: Response = ErrorFailedDependency("err").into(); assert_eq!(r.status(), StatusCode::FAILED_DEPENDENCY); let r: Response = ErrorUpgradeRequired("err").into(); assert_eq!(r.status(), StatusCode::UPGRADE_REQUIRED); let r: Response = ErrorPreconditionRequired("err").into(); assert_eq!(r.status(), StatusCode::PRECONDITION_REQUIRED); let r: Response = ErrorTooManyRequests("err").into(); assert_eq!(r.status(), StatusCode::TOO_MANY_REQUESTS); let r: Response = ErrorRequestHeaderFieldsTooLarge("err").into(); assert_eq!(r.status(), StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE); let r: Response = ErrorUnavailableForLegalReasons("err").into(); assert_eq!(r.status(), StatusCode::UNAVAILABLE_FOR_LEGAL_REASONS); let r: Response = ErrorInternalServerError("err").into(); assert_eq!(r.status(), StatusCode::INTERNAL_SERVER_ERROR); let r: Response = ErrorNotImplemented("err").into(); assert_eq!(r.status(), StatusCode::NOT_IMPLEMENTED); let r: Response = ErrorBadGateway("err").into(); assert_eq!(r.status(), StatusCode::BAD_GATEWAY); let r: Response = ErrorServiceUnavailable("err").into(); assert_eq!(r.status(), StatusCode::SERVICE_UNAVAILABLE); let r: Response = ErrorGatewayTimeout("err").into(); assert_eq!(r.status(), StatusCode::GATEWAY_TIMEOUT); let r: Response = ErrorHttpVersionNotSupported("err").into(); assert_eq!(r.status(), StatusCode::HTTP_VERSION_NOT_SUPPORTED); let r: Response = ErrorVariantAlsoNegotiates("err").into(); assert_eq!(r.status(), StatusCode::VARIANT_ALSO_NEGOTIATES); let r: Response = ErrorInsufficientStorage("err").into(); assert_eq!(r.status(), StatusCode::INSUFFICIENT_STORAGE); let r: Response = ErrorLoopDetected("err").into(); assert_eq!(r.status(), StatusCode::LOOP_DETECTED); let r: Response = ErrorNotExtended("err").into(); assert_eq!(r.status(), StatusCode::NOT_EXTENDED); let r: Response = ErrorNetworkAuthenticationRequired("err").into(); assert_eq!(r.status(), StatusCode::NETWORK_AUTHENTICATION_REQUIRED); } }
30.748947
99
0.646703
de31e814252b94c73aa7e1f3b44055a5bbd7f67f
5,669
use crate::leader_arrange::LeaderSchedule; use crate::staking_utils; use morgan_runtime::bank::Bank; use morgan_interface::pubkey::Pubkey; use morgan_interface::timing::NUM_CONSECUTIVE_LEADER_SLOTS; use proptest::{ strategy::{Strategy, ValueTree}, test_runner::{Config, TestRunner}, }; /// Return the leader schedule for the given epoch. pub fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> { staking_utils::staked_nodes_at_epoch(bank, epoch_height).map(|stakes| { let mut seed = [0u8; 32]; seed[0..8].copy_from_slice(&epoch_height.to_le_bytes()); let mut stakes: Vec<_> = stakes.into_iter().collect(); sort_stakes(&mut stakes); LeaderSchedule::new( &stakes, seed, bank.get_slots_in_epoch(epoch_height), NUM_CONSECUTIVE_LEADER_SLOTS, ) }) } /// Return the leader for the given slot. pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> { let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot); leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index]) } // Returns the number of ticks remaining from the specified tick_height to the end of the // slot implied by the tick_height pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 { bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() - 1 } pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 { tick_height / ticks_per_slot } /// Context for generating single values out of strategies. /// /// Proptest is designed to be built around "value trees", which represent a spectrum from complex /// values to simpler ones. But in some contexts, like benchmarking or generating corpuses, one just /// wants a single value. This is a convenience struct for that. pub struct ValueGenerator { runner: TestRunner, } impl ValueGenerator { /// Creates a new value generator with the default RNG. pub fn new() -> Self { Self { runner: TestRunner::new(Config::default()), } } /// Creates a new value generator with a deterministic RNG. /// /// This generator has a hardcoded seed, so its results are predictable across test runs. /// However, a new proptest version may change the seed. pub fn deterministic() -> Self { Self { runner: TestRunner::deterministic(), } } /// Generates a single value for this strategy. /// /// Panics if generating the new value fails. The only situation in which this can happen is if /// generating the value causes too many internal rejects. pub fn generate<S: Strategy>(&mut self, strategy: S) -> S::Value { strategy .new_tree(&mut self.runner) .expect("creating a new value should succeed") .current() } } fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) { // Sort first by stake. If stakes are the same, sort by pubkey to ensure a // deterministic result. // Note: Use unstable sort, because we dedup right after to remove the equal elements. stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| { if r_stake == l_stake { r_pubkey.cmp(&l_pubkey) } else { r_stake.cmp(&l_stake) } }); // Now that it's sorted, we can do an O(n) dedup. stakes.dedup(); } #[cfg(test)] mod tests { use super::*; use crate::staking_utils; use morgan_runtime::genesis_utils::{ create_genesis_block_with_leader, BOOTSTRAP_LEADER_DIFS, }; #[test] fn test_leader_schedule_via_bank() { let pubkey = Pubkey::new_rand(); let genesis_block = create_genesis_block_with_leader(0, &pubkey, BOOTSTRAP_LEADER_DIFS).genesis_block; let bank = Bank::new(&genesis_block); let pubkeys_and_stakes: Vec<_> = staking_utils::staked_nodes(&bank).into_iter().collect(); let seed = [0u8; 32]; let leader_schedule = LeaderSchedule::new( &pubkeys_and_stakes, seed, genesis_block.slots_per_epoch, NUM_CONSECUTIVE_LEADER_SLOTS, ); assert_eq!(leader_schedule[0], pubkey); assert_eq!(leader_schedule[1], pubkey); assert_eq!(leader_schedule[2], pubkey); } #[test] fn test_leader_scheduler1_basic() { let pubkey = Pubkey::new_rand(); let genesis_block = create_genesis_block_with_leader( BOOTSTRAP_LEADER_DIFS, &pubkey, BOOTSTRAP_LEADER_DIFS, ) .genesis_block; let bank = Bank::new(&genesis_block); assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey); } #[test] fn test_sort_stakes_basic() { let pubkey0 = Pubkey::new_rand(); let pubkey1 = Pubkey::new_rand(); let mut stakes = vec![(pubkey0, 1), (pubkey1, 2)]; sort_stakes(&mut stakes); assert_eq!(stakes, vec![(pubkey1, 2), (pubkey0, 1)]); } #[test] fn test_sort_stakes_with_dup() { let pubkey0 = Pubkey::new_rand(); let pubkey1 = Pubkey::new_rand(); let mut stakes = vec![(pubkey0, 1), (pubkey1, 2), (pubkey0, 1)]; sort_stakes(&mut stakes); assert_eq!(stakes, vec![(pubkey1, 2), (pubkey0, 1)]); } #[test] fn test_sort_stakes_with_equal_stakes() { let pubkey0 = Pubkey::default(); let pubkey1 = Pubkey::new_rand(); let mut stakes = vec![(pubkey0, 1), (pubkey1, 1)]; sort_stakes(&mut stakes); assert_eq!(stakes, vec![(pubkey1, 1), (pubkey0, 1)]); } }
33.744048
100
0.637502
f4536f9c9e2189f97aaf1e9b8f652218b87e647e
149
// compile-flags: -W bad-style // check-pass fn main() { let _InappropriateCamelCasing = true; //~^ WARNING should have a snake case name }
18.625
46
0.66443
23f88097de9b4177fb59d0d7b5df7ef6864449e4
1,847
//! Gameboard controller. use piston::Event; use piston::input::GenericEvent; use crate::gameboard::{Direction, Gameboard}; /// Handles events for Sudoku game. pub struct GameboardController { /// Stores the gameboard state. pub gameboard: Gameboard, } impl GameboardController { /// Creates a new gameboard controller. pub fn new(gameboard: Gameboard) -> Self { Self { gameboard } } /// Handles events. pub fn event<E: GenericEvent>(&mut self, e: &E) { use piston::input::{Button, ButtonState, Key, UpdateArgs}; if let Some(button_args) = e.button_args() { let pressed = button_args.state == ButtonState::Press; if let Button::Keyboard(key) = button_args.button { match key { Key::Left => { let dir = &mut self.gameboard.player.direction; if pressed { *dir = Direction::Left; } else { if *dir == Direction::Left { *dir = Direction::Idle; } } } Key::Right => { let dir = &mut self.gameboard.player.direction; if pressed { *dir = Direction::Right; } else { if *dir == Direction::Right { *dir = Direction::Idle; } } } _ => {} } } } if let Some(UpdateArgs { dt }) = e.update_args() { // println!("{}", dt); self.gameboard.update(dt); } } }
31.844828
71
0.421765
0a1ead005e89c9002c1e376d66f5150f17d0cc21
2,377
use hamcrest2::core::Matcher; use support::sandbox::sandbox; use test_support::matchers::execs; use notion_fail::ExitCode; const BASIC_PACKAGE_JSON: &'static str = r#"{ "name": "test-package" }"#; fn package_json_with_pinned_node(node: &str, npm: &str) -> String { format!( r#"{{ "name": "test-package", "toolchain": {{ "node": "{}", "npm": "{}" }} }}"#, node, npm ) } #[test] fn pinned_project() { let s = sandbox() .package_json(&package_json_with_pinned_node("4.1.0", "2.14.3")) .build(); assert_that!( s.notion("current"), execs() .with_status(0) .with_stdout_contains("project: v4.1.0 (active)") ); } #[test] fn pinned_project_with_user_node_default() { let s = sandbox() .package_json(&package_json_with_pinned_node("4.1.0", "2.14.3")) .platform(r#"{"node":{"runtime":"9.11.2","npm":"5.6.0"}}"#) .build(); assert_that!( s.notion("current"), execs() .with_status(0) .with_stdout_contains("project: v4.1.0 (active)") .with_stdout_contains("user: v9.11.2") ); } #[test] fn unpinned_project() { let s = sandbox().package_json(BASIC_PACKAGE_JSON).build(); assert_that!( s.notion("current"), execs() .with_status(ExitCode::NoVersionMatch as i32) .with_stderr("error: no versions found") ); } #[test] fn unpinned_project_with_user_node_default() { let s = sandbox() .package_json(BASIC_PACKAGE_JSON) .platform(r#"{"node":{"runtime":"9.11.2","npm":"5.6.0"}}"#) .build(); assert_that!( s.notion("current"), execs() .with_status(0) .with_stdout_contains("user: v9.11.2 (active)") ); } #[test] fn no_project() { let s = sandbox().build(); assert_that!( s.notion("current"), execs() .with_status(ExitCode::NoVersionMatch as i32) .with_stderr("error: no versions found") ); } #[test] fn no_project_with_user_node_default() { let s = sandbox() .platform(r#"{"node":{"runtime":"9.11.2","npm":"5.6.0"}}"#) .build(); assert_that!( s.notion("current"), execs() .with_status(0) .with_stdout_contains("user: v9.11.2 (active)") ); }
22.424528
72
0.549432
d6ba3063676342141c8d55deb83ac95368983874
4,488
//! Everything needed to handle and create commands use super::command::Command; use crate::{ context::BotContext, database::User, handler::{Handler, SimpleHandler, Twitch}, message::{Message, MessageConsumer, MessageResult}, }; use anyhow::{Context, Result}; use async_trait::async_trait; use std::{collections::HashMap, sync::Arc}; use thiserror::Error; use twitchchat::messages::Privmsg; #[derive(Error, Debug)] pub enum CommandHandlerError { #[error("Could not execute command (name: {0})")] ExecuteCommand(String), #[error("Could not send privmsg")] SendPrivmsg, } pub struct CommandHandler { commands: HashMap<String, Arc<Command>>, // translate aliases to command names aliases: HashMap<String, String>, /// The prefix to use when checking for commands in a message. prefix: char, #[allow(dead_code)] context: Arc<BotContext>, } impl CommandHandler { /// Create a new CommandHandler pub fn new(context: Arc<BotContext>, commands: Vec<Arc<Command>>) -> Self { let mut aliases: HashMap<String, String> = HashMap::new(); let mut command_map: HashMap<String, Arc<Command>> = HashMap::new(); for command in commands { for alias in command.aliases() { aliases.insert(alias.to_owned(), command.name().to_string()); } command_map.insert(command.name().to_owned(), command); } Self { context, commands: command_map, aliases, prefix: '~', } } } impl SimpleHandler for CommandHandler { fn name(&self) -> &str { "command" } } impl Handler<Command> for CommandHandler { /// Get a command by `name`. This can either be the command name or any of it's aliases. fn get(&self, name: String) -> Option<Arc<Command>> { let name = self.aliases.get(&name).unwrap_or_else(|| &name); self.commands.get(name).cloned() } } #[async_trait] impl Twitch for CommandHandler { async fn handle(&self, msg: Arc<Privmsg<'_>>, user: &User) -> Result<()> { let message = msg.data.trim().replace("\u{e0000}", ""); // remove chatterino chars let words: Vec<String> = message.split_whitespace().map(|s| s.to_string()).collect(); let mut command_name = words[0].clone(); let prefix = command_name.remove(0); if prefix != self.prefix { trace!("Prefix not found"); return Ok(()); } let args = &words[1..]; trace!("Command: {} Args: {:?}", command_name, args); let cmd = match self.get(command_name) { Some(c) => c, None => { trace!("No matching command found"); return Ok(()); } }; debug!("Found matching command {}", Command::name(&cmd)); if cmd.whitelisted() { debug!( "Command is not whitelisted in this channel (name: {}, channel: {})", cmd.name(), msg.channel ); return Ok(()); } // or the command is enabled in this channel let mut writer = self.context.twitchbot().writer(); trace!("Executing command"); match cmd .consume( self.context.clone(), args.to_vec(), Message::TwitchPrivmsg(msg.clone()), user, ) .context(CommandHandlerError::ExecuteCommand(cmd.name().to_owned()))? { MessageResult::None => Ok(()), MessageResult::Reply(m) => writer .privmsg( &msg.channel, format!("{}, {}", user.display_name_or_name(), m).as_str(), ) .await .context(CommandHandlerError::SendPrivmsg), MessageResult::Message(m) => writer .privmsg(&msg.channel, &m) .await .context(CommandHandlerError::SendPrivmsg), MessageResult::Error(m) => writer .privmsg(&msg.channel, format!("Error: {}", m)) .await .context(CommandHandlerError::SendPrivmsg), MessageResult::MissingArgument(a) => writer .privmsg(&msg.channel, format!("Missing argument `{}`", a)) .await .context(CommandHandlerError::SendPrivmsg), } } }
30.951724
93
0.547906
4abe5b1c0507199f2f2d95879bf73bc10eef0076
4,413
use core::fmt; use std::{num::NonZeroUsize, time::Duration}; use criterion::{ criterion_group, measurement::WallTime, BatchSize, BenchmarkGroup, BenchmarkId, Criterion, SamplingMode, Throughput, }; use vector::transforms::dedupe::{CacheConfig, Dedupe, DedupeConfig, FieldMatchConfig}; use vector_core::transform::Transform; use crate::common::{consume, FixedLogStream}; #[derive(Debug)] struct Param { slug: &'static str, input: FixedLogStream, dedupe_config: DedupeConfig, } impl fmt::Display for Param { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.slug) } } fn dedupe(c: &mut Criterion) { let mut group: BenchmarkGroup<WallTime> = c.benchmark_group("vector::transforms::dedupe::Dedupe"); group.sampling_mode(SamplingMode::Auto); let fixed_stream = FixedLogStream::new( NonZeroUsize::new(128).unwrap(), NonZeroUsize::new(2).unwrap(), ); for param in &[ // Measurement where field "message" is ignored. This field is // automatically added by the LogEvent construction mechanism. Param { slug: "field_ignore_message", input: fixed_stream.clone(), dedupe_config: DedupeConfig { fields: Some(FieldMatchConfig::IgnoreFields(vec![String::from( "message", )])), cache: CacheConfig { num_events: 4 }, }, }, // Modification of previous where field "message" is matched. Param { slug: "field_match_message", input: fixed_stream.clone(), dedupe_config: DedupeConfig { fields: Some(FieldMatchConfig::MatchFields(vec![String::from("message")])), cache: CacheConfig { num_events: 4 }, }, }, // Measurement where ignore fields do not exist in the event. Param { slug: "field_ignore_done", input: fixed_stream.clone(), dedupe_config: DedupeConfig { cache: CacheConfig { num_events: 4 }, fields: Some(FieldMatchConfig::IgnoreFields(vec![ String::from("abcde"), String::from("eabcd"), String::from("deabc"), String::from("cdeab"), String::from("bcdea"), ])), }, }, // Modification of previous where match fields do not exist in the // event. Param { slug: "field_match_done", input: fixed_stream.clone(), dedupe_config: DedupeConfig { cache: CacheConfig { num_events: 4 }, fields: Some(FieldMatchConfig::MatchFields(vec![ String::from("abcde"), String::from("eabcd"), String::from("deabc"), String::from("cdeab"), String::from("bcdea"), ])), }, }, ] { group.throughput(Throughput::Elements(param.input.len() as u64)); group.bench_with_input(BenchmarkId::new("transform", param), &param, |b, param| { b.iter_batched( || { let dedupe = Transform::event_task(Dedupe::new(param.dedupe_config.clone())).into_task(); (Box::new(dedupe), Box::pin(param.input.clone())) }, |(dedupe, input)| { let output = dedupe.transform_events(input); consume(output) }, BatchSize::SmallInput, ) }); } } criterion_group!( name = benches; config = Criterion::default() .warm_up_time(Duration::from_secs(5)) .measurement_time(Duration::from_secs(120)) // degree of noise to ignore in measurements, here 1% .noise_threshold(0.01) // likelihood of noise registering as difference, here 5% .significance_level(0.05) // likelihood of capturing the true runtime, here 95% .confidence_level(0.95) // total number of bootstrap resamples, higher is less noisy but slower .nresamples(100_000) // total samples to collect within the set measurement time .sample_size(150); targets = dedupe );
35.58871
100
0.5477
75864f0cceeaa4b51f7f707539d5725410f4224b
9,702
//! Minkowski sum. use std::ops::{Index, IndexMut, Add, Sub, Mul, Div, Neg}; use na::{Dim, ApproxEq, Orig, PntAsVec, Axpy, Translate, NumPnt, NumVec, POrd, POrdering, ScalarSub, ScalarAdd, ScalarMul, ScalarDiv, FloatPnt, Bounded}; use na; use shape::Reflection; use math::{Scalar, Point, Vect}; /// Type of an implicit representation of the Configuration Space Obstacle /// formed by two geometric objects. pub type CSO<'a, M, G1, G2> = MinkowskiSum<'a, M, G1, Reflection<'a, G2>>; pub type AnnotatedCSO<'a, M, G1, G2> = AnnotatedMinkowskiSum<'a, M, G1, Reflection<'a, G2>>; /** * SupportMap representation of the Minkowski sum of two shapes. * * The only way to obtain the sum points is to use its support mapping * function. * * - `G1`: type of the first object involved on the sum. * - `G2`: type of the second object involved on the sum. */ #[derive(Debug)] pub struct MinkowskiSum<'a, M: 'a, G1: ?Sized + 'a, G2: ?Sized + 'a> { m1: &'a M, g1: &'a G1, m2: &'a M, g2: &'a G2 } impl<'a, M, G1: ?Sized, G2: ?Sized> MinkowskiSum<'a, M, G1, G2> { /** * Builds the Minkowski sum of two shapes. Since the representation is * implicit, this is done in constant time. */ #[inline] pub fn new(m1: &'a M, g1: &'a G1, m2: &'a M, g2: &'a G2) -> MinkowskiSum<'a, M, G1, G2> { MinkowskiSum { m1: m1, g1: g1, m2: m2, g2: g2 } } /// The transformation matrix of the first shape of this Minkowski Sum. #[inline] pub fn m1(&self) -> &'a M { self.m1 } /// The transformation matrix of the second shape of this Minkowski Sum. #[inline] pub fn m2(&self) -> &'a M { self.m2 } /// The first shape of this Minkowski Sum. #[inline] pub fn g1(&self) -> &'a G1 { self.g1 } /// The second shape of this Minkowski Sum. #[inline] pub fn g2(&self) -> &'a G2 { self.g2 } } /** * Same as the MinkowskiSum but with a support mapping which keeps track of the * original supports points from the two wrapped shapes. * * * `G1`: type of the first object involved on the sum. * * `G2`: type of the second object involved on the sum. */ #[derive(Debug)] pub struct AnnotatedMinkowskiSum<'a, M: 'a, G1: ?Sized + 'a, G2: ?Sized + 'a> { m1: &'a M, g1: &'a G1, m2: &'a M, g2: &'a G2 } impl<'a, M, G1: ?Sized, G2: ?Sized> AnnotatedMinkowskiSum<'a, M, G1, G2> { /** * Builds the Minkowski sum of two shapes. Since the representation is * implicit, this is done in constant time. */ #[inline] pub fn new(m1: &'a M, g1: &'a G1, m2: &'a M, g2: &'a G2) -> AnnotatedMinkowskiSum<'a, M, G1, G2> { AnnotatedMinkowskiSum { m1: m1, g1: g1, m2: m2, g2: g2 } } /// The transformation matrix of the first shape of this Minkowski Sum. #[inline] pub fn m1(&self) -> &'a M { self.m1 } /// The transformation matrix of the second shape of this Minkowski Sum. #[inline] pub fn m2(&self) -> &'a M { self.m2 } /// The first shape of this Minkowski Sum. #[inline] pub fn g1(&self) -> &'a G1 { self.g1 } /// The second shape of this Minkowski Sum. #[inline] pub fn g2(&self) -> &'a G2 { self.g2 } } // FIXME: AnnotatedPoint is not a good name. // XXX: do not hide the documentation! #[doc(hidden)] #[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable)] pub struct AnnotatedPoint<P> { orig1: P, orig2: P, point: P } impl<P> AnnotatedPoint<P> { #[doc(hidden)] #[inline] pub fn new(orig1: P, orig2: P, point: P) -> AnnotatedPoint<P> { AnnotatedPoint { orig1: orig1, orig2: orig2, point: point } } #[doc(hidden)] #[inline] pub fn point<'r>(&'r self) -> &'r P { &self.point } #[doc(hidden)] #[inline] pub fn orig1(&self) -> &P { &self.orig1 } #[doc(hidden)] #[inline] pub fn orig2(&self) -> &P { &self.orig2 } #[doc(hidden)] #[inline] pub fn translate_1<V: Translate<P>>(&mut self, t: &V) { self.orig1 = na::translate(t, &self.orig1); self.point = na::translate(t, &self.point); } #[doc(hidden)] #[inline] pub fn translate_2<V: Translate<P>>(&mut self, t: &V) { self.orig2 = na::translate(t, &self.orig2); self.point = na::translate(t, &self.point); } } impl<P: PntAsVec<V>, V> PntAsVec<V> for AnnotatedPoint<P> { #[inline] fn to_vec(self) -> V { self.point.to_vec() } #[inline] fn as_vec<'a>(&'a self) -> &'a V { self.point.as_vec() } #[inline] fn set_coords(&mut self, _: V) { panic!(".set_coords is not implemented for annotated points.") } } impl<P: Index<usize>> Index<usize> for AnnotatedPoint<P> { type Output = P::Output; #[inline] fn index(&self, i: &usize) -> &P::Output { &self.point[*i] } } impl<P: IndexMut<usize>> IndexMut<usize> for AnnotatedPoint<P> { #[inline] fn index_mut(&mut self, _: &usize) -> &mut P::Output { unimplemented!() } } impl<P> POrd for AnnotatedPoint<P> { fn inf(&self, _: &AnnotatedPoint<P>) -> AnnotatedPoint<P> { unimplemented!() } fn sup(&self, _: &AnnotatedPoint<P>) -> AnnotatedPoint<P> { unimplemented!() } fn partial_cmp(&self, _: &AnnotatedPoint<P>) -> POrdering { unimplemented!() } } impl<P: Orig> Orig for AnnotatedPoint<P> { #[inline] fn orig() -> AnnotatedPoint<P> { AnnotatedPoint::new(na::orig(), na::orig(), na::orig()) } #[inline] fn is_orig(&self) -> bool { self.point.is_orig() } } #[old_impl_check] impl<N, P, V> Add<V> for AnnotatedPoint<P> where N: Scalar, P: Add<V, Output = P>, V: Copy + Mul<N, Output = V> { type Output = AnnotatedPoint<P>; #[inline] fn add(self, other: V) -> AnnotatedPoint<P> { let _0_5: N = na::cast(0.5f64); AnnotatedPoint::new( self.orig1 + other * _0_5, self.orig2 + other * _0_5, self.point + other ) } } impl<N, P: Axpy<N>> Axpy<N> for AnnotatedPoint<P> { #[inline] fn axpy(&mut self, a: &N, x: &AnnotatedPoint<P>) { self.orig1.axpy(a, &x.orig1); self.orig2.axpy(a, &x.orig2); self.point.axpy(a, &x.point); } } impl<P: Sub<P>> Sub<AnnotatedPoint<P>> for AnnotatedPoint<P> { type Output = P::Output; #[inline] fn sub(self, other: AnnotatedPoint<P>) -> P::Output { self.point - other.point } } #[old_impl_check] impl<N, P, V> ScalarSub<N> for AnnotatedPoint<P> where P: Point<N, V> { fn sub_s(&self, _: &N) -> AnnotatedPoint<P> { unimplemented!() } } #[old_impl_check] impl<N, P, V> ScalarAdd<N> for AnnotatedPoint<P> where P: Point<N, V> { fn add_s(&self, _: &N) -> AnnotatedPoint<P> { unimplemented!() } } #[old_impl_check] impl<N, P, V> ScalarMul<N> for AnnotatedPoint<P> where P: Point<N, V> { fn mul_s(&self, _: &N) -> AnnotatedPoint<P> { unimplemented!() } } #[old_impl_check] impl<N, P, V> ScalarDiv<N> for AnnotatedPoint<P> where P: Point<N, V> { fn div_s(&self, _: &N) -> AnnotatedPoint<P> { unimplemented!() } } impl<P: Neg<Output = P>> Neg for AnnotatedPoint<P> { type Output = AnnotatedPoint<P>; #[inline] fn neg(self) -> AnnotatedPoint<P> { AnnotatedPoint::new(-self.orig1, -self.orig2, -self.point) } } impl<P: Dim> Dim for AnnotatedPoint<P> { #[inline] fn dim(_: Option<AnnotatedPoint<P>>) -> usize { na::dim::<P>() } } impl<N: Copy, P: Div<N, Output = P>> Div<N> for AnnotatedPoint<P> { type Output = AnnotatedPoint<P>; #[inline] fn div(self, n: N) -> AnnotatedPoint<P> { AnnotatedPoint::new(self.orig1 / n, self.orig2 / n, self.point / n) } } impl<N: Copy, P: Mul<N, Output = P>> Mul<N> for AnnotatedPoint<P> { type Output = AnnotatedPoint<P>; #[inline] fn mul(self, n: N) -> AnnotatedPoint<P> { AnnotatedPoint::new(self.orig1 * n, self.orig2 * n, self.point * n) } } impl<P: PartialEq> PartialEq for AnnotatedPoint<P> { #[inline] fn eq(&self, other: &AnnotatedPoint<P>) -> bool { self.point == other.point } #[inline] fn ne(&self, other: &AnnotatedPoint<P>) -> bool { self.point != other.point } } impl<N, P> ApproxEq<N> for AnnotatedPoint<P> where N: Scalar, P: ApproxEq<N> { #[inline] fn approx_epsilon(_: Option<AnnotatedPoint<P>>) -> N { ApproxEq::approx_epsilon(None::<N>) } #[inline] fn approx_eq_eps(&self, other: &AnnotatedPoint<P>, eps: &N) -> bool { self.point.approx_eq_eps(&other.point, eps) } #[inline] fn approx_ulps(_: Option<AnnotatedPoint<P>>) -> u32 { ApproxEq::approx_ulps(None::<N>) } #[inline] fn approx_eq_ulps(&self, other: &AnnotatedPoint<P>, ulps: u32) -> bool { self.point.approx_eq_ulps(&other.point, ulps) } } impl<P> Bounded for AnnotatedPoint<P> { fn min_value() -> AnnotatedPoint<P> { unimplemented!() } fn max_value() -> AnnotatedPoint<P> { unimplemented!() } } impl<N, P, V> NumPnt<N, V> for AnnotatedPoint<P> where N: Scalar, P: NumPnt<N, V>, V: Copy + NumVec<N> { } impl<N, P, V> FloatPnt<N, V> for AnnotatedPoint<P> where N: Scalar, P: Point<N, V>, V: Vect<N> { } impl<N, P, V> Point<N, V> for AnnotatedPoint<P> where N: Scalar, P: Point<N, V>, V: Vect<N> { }
24.5
102
0.564523
ffc26738dd7b2d7ca23d73eb9f98d08e833751c8
62,272
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Support code for rustc's built in unit-test and micro-benchmarking //! framework. //! //! Almost all user code will only be interested in `Bencher` and //! `black_box`. All other interactions (such as writing tests and //! benchmarks themselves) should be done via the `#[test]` and //! `#[bench]` attributes. //! //! See the [Testing Guide](../guide-testing.html) for more details. // Currently, not much of this is meant for users. It is intended to // support the simplest interface possible for representing and // running tests while providing a base that other test frameworks may // build off of. #![crate_name = "test"] #![experimental] #![crate_type = "rlib"] #![crate_type = "dylib"] #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/nightly/")] #![feature(asm, macro_rules, phase, globs, slicing_syntax)] extern crate getopts; extern crate regex; extern crate serialize; extern crate term; pub use self::TestFn::*; pub use self::MetricChange::*; pub use self::ColorConfig::*; pub use self::TestResult::*; pub use self::TestName::*; use self::TestEvent::*; use self::NamePadding::*; use self::OutputLocation::*; use std::any::{Any, AnyRefExt}; use std::collections::TreeMap; use stats::Stats; use getopts::{OptGroup, optflag, optopt}; use regex::Regex; use serialize::{json, Decodable, Encodable}; use term::Terminal; use term::color::{Color, RED, YELLOW, GREEN, CYAN}; use std::cmp; use std::f64; use std::fmt::Show; use std::fmt; use std::io::fs::PathExtensions; use std::io::stdio::StdWriter; use std::io::{File, ChanReader, ChanWriter}; use std::io; use std::num::{Float, FloatMath, Int}; use std::os; use std::str::FromStr; use std::string::String; use std::task::TaskBuilder; use std::time::Duration; // to be used by rustc to compile tests in libtest pub mod test { pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk, Metric, MetricMap, MetricAdded, MetricRemoved, MetricChange, Improvement, Regression, LikelyNoise, StaticTestFn, StaticTestName, DynTestName, DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts, StaticBenchFn, ShouldFail}; } pub mod stats; // The name of a test. By convention this follows the rules for rust // paths; i.e. it should be a series of identifiers separated by double // colons. This way if some test runner wants to arrange the tests // hierarchically it may. #[deriving(Clone, PartialEq, Eq, Hash)] pub enum TestName { StaticTestName(&'static str), DynTestName(String) } impl TestName { fn as_slice<'a>(&'a self) -> &'a str { match *self { StaticTestName(s) => s, DynTestName(ref s) => s.as_slice() } } } impl Show for TestName { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_slice().fmt(f) } } #[deriving(Clone)] enum NamePadding { PadNone, PadOnLeft, PadOnRight, } impl Copy for NamePadding {} impl TestDesc { fn padded_name(&self, column_count: uint, align: NamePadding) -> String { let mut name = String::from_str(self.name.as_slice()); let fill = column_count.saturating_sub(name.len()); let mut pad = " ".repeat(fill); match align { PadNone => name, PadOnLeft => { pad.push_str(name.as_slice()); pad } PadOnRight => { name.push_str(pad.as_slice()); name } } } } /// Represents a benchmark function. pub trait TDynBenchFn { fn run(&self, harness: &mut Bencher); } // A function that runs a test. If the function returns successfully, // the test succeeds; if the function panics then the test fails. We // may need to come up with a more clever definition of test in order // to support isolation of tests into tasks. pub enum TestFn { StaticTestFn(fn()), StaticBenchFn(fn(&mut Bencher)), StaticMetricFn(proc(&mut MetricMap):'static), DynTestFn(proc():Send), DynMetricFn(proc(&mut MetricMap):'static), DynBenchFn(Box<TDynBenchFn+'static>) } impl TestFn { fn padding(&self) -> NamePadding { match self { &StaticTestFn(..) => PadNone, &StaticBenchFn(..) => PadOnRight, &StaticMetricFn(..) => PadOnRight, &DynTestFn(..) => PadNone, &DynMetricFn(..) => PadOnRight, &DynBenchFn(..) => PadOnRight, } } } impl fmt::Show for TestFn { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write(match *self { StaticTestFn(..) => "StaticTestFn(..)", StaticBenchFn(..) => "StaticBenchFn(..)", StaticMetricFn(..) => "StaticMetricFn(..)", DynTestFn(..) => "DynTestFn(..)", DynMetricFn(..) => "DynMetricFn(..)", DynBenchFn(..) => "DynBenchFn(..)" }.as_bytes()) } } /// Manager of the benchmarking runs. /// /// This is feed into functions marked with `#[bench]` to allow for /// set-up & tear-down before running a piece of code repeatedly via a /// call to `iter`. #[deriving(Copy)] pub struct Bencher { iterations: u64, dur: Duration, pub bytes: u64, } #[deriving(Copy, Clone, Show, PartialEq, Eq, Hash)] pub enum ShouldFail { No, Yes(Option<&'static str>) } // The definition of a single test. A test runner will run a list of // these. #[deriving(Clone, Show, PartialEq, Eq, Hash)] pub struct TestDesc { pub name: TestName, pub ignore: bool, pub should_fail: ShouldFail, } #[deriving(Show)] pub struct TestDescAndFn { pub desc: TestDesc, pub testfn: TestFn, } #[deriving(Clone, Encodable, Decodable, PartialEq, Show)] pub struct Metric { value: f64, noise: f64 } impl Copy for Metric {} impl Metric { pub fn new(value: f64, noise: f64) -> Metric { Metric {value: value, noise: noise} } } #[deriving(PartialEq)] pub struct MetricMap(TreeMap<String,Metric>); impl Clone for MetricMap { fn clone(&self) -> MetricMap { let MetricMap(ref map) = *self; MetricMap(map.clone()) } } /// Analysis of a single change in metric #[deriving(PartialEq, Show)] pub enum MetricChange { LikelyNoise, MetricAdded, MetricRemoved, Improvement(f64), Regression(f64) } impl Copy for MetricChange {} pub type MetricDiff = TreeMap<String,MetricChange>; // The default console test runner. It accepts the command line // arguments and a vector of test_descs. pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) { let opts = match parse_opts(args) { Some(Ok(o)) => o, Some(Err(msg)) => panic!("{}", msg), None => return }; match run_tests_console(&opts, tests) { Ok(true) => {} Ok(false) => panic!("Some tests failed"), Err(e) => panic!("io error when running tests: {}", e), } } // A variant optimized for invocation with a static test vector. // This will panic (intentionally) when fed any dynamic tests, because // it is copying the static values out into a dynamic vector and cannot // copy dynamic values. It is doing this because from this point on // a ~[TestDescAndFn] is used in order to effect ownership-transfer // semantics into parallel test runners, which in turn requires a ~[] // rather than a &[]. pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) { let owned_tests = tests.iter().map(|t| { match t.testfn { StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() }, StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() }, _ => panic!("non-static tests passed to test::test_main_static") } }).collect(); test_main(args, owned_tests) } pub enum ColorConfig { AutoColor, AlwaysColor, NeverColor, } impl Copy for ColorConfig {} pub struct TestOpts { pub filter: Option<Regex>, pub run_ignored: bool, pub run_tests: bool, pub run_benchmarks: bool, pub ratchet_metrics: Option<Path>, pub ratchet_noise_percent: Option<f64>, pub save_metrics: Option<Path>, pub test_shard: Option<(uint,uint)>, pub logfile: Option<Path>, pub nocapture: bool, pub color: ColorConfig, pub show_boxplot: bool, pub boxplot_width: uint, pub show_all_stats: bool, } impl TestOpts { #[cfg(test)] fn new() -> TestOpts { TestOpts { filter: None, run_ignored: false, run_tests: false, run_benchmarks: false, ratchet_metrics: None, ratchet_noise_percent: None, save_metrics: None, test_shard: None, logfile: None, nocapture: false, color: AutoColor, show_boxplot: false, boxplot_width: 50, show_all_stats: false, } } } /// Result of parsing the options. pub type OptRes = Result<TestOpts, String>; fn optgroups() -> Vec<getopts::OptGroup> { vec!(getopts::optflag("", "ignored", "Run ignored tests"), getopts::optflag("", "test", "Run tests and not benchmarks"), getopts::optflag("", "bench", "Run benchmarks instead of tests"), getopts::optflag("h", "help", "Display this message (longer with --help)"), getopts::optopt("", "save-metrics", "Location to save bench metrics", "PATH"), getopts::optopt("", "ratchet-metrics", "Location to load and save metrics from. The metrics \ loaded are cause benchmarks to fail if they run too \ slowly", "PATH"), getopts::optopt("", "ratchet-noise-percent", "Tests within N% of the recorded metrics will be \ considered as passing", "PERCENTAGE"), getopts::optopt("", "logfile", "Write logs to the specified file instead \ of stdout", "PATH"), getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite", "A.B"), getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \ task, allow printing directly"), getopts::optopt("", "color", "Configure coloring of output: auto = colorize if stdout is a tty and tests are run on serially (default); always = always colorize output; never = never colorize output;", "auto|always|never"), getopts::optflag("", "boxplot", "Display a boxplot of the benchmark statistics"), getopts::optopt("", "boxplot-width", "Set the boxplot width (default 50)", "WIDTH"), getopts::optflag("", "stats", "Display the benchmark min, max, and quartiles")) } fn usage(binary: &str) { let message = format!("Usage: {} [OPTIONS] [FILTER]", binary); println!(r#"{usage} The FILTER regex is tested against the name of all tests to run, and only those tests that match are run. By default, all tests are run in parallel. This can be altered with the RUST_TEST_TASKS environment variable when running tests (set it to 1). All tests have their standard output and standard error captured by default. This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1 environment variable. Logging is not captured by default. Test Attributes: #[test] - Indicates a function is a test to be run. This function takes no arguments. #[bench] - Indicates a function is a benchmark to be run. This function takes one argument (test::Bencher). #[should_fail] - This function (also labeled with #[test]) will only pass if the code causes a failure (an assertion failure or panic!) A message may be provided, which the failure string must contain: #[should_fail(expected = "foo")]. #[ignore] - When applied to a function which is already attributed as a test, then the test runner will ignore these tests during normal test runs. Running with --ignored will run these tests."#, usage = getopts::usage(message.as_slice(), optgroups().as_slice())); } // Parses command line arguments into test options pub fn parse_opts(args: &[String]) -> Option<OptRes> { let args_ = args.tail(); let matches = match getopts::getopts(args_.as_slice(), optgroups().as_slice()) { Ok(m) => m, Err(f) => return Some(Err(f.to_string())) }; if matches.opt_present("h") { usage(args[0].as_slice()); return None; } let filter = if matches.free.len() > 0 { let s = matches.free[0].as_slice(); match Regex::new(s) { Ok(re) => Some(re), Err(e) => return Some(Err(format!("could not parse /{}/: {}", s, e))) } } else { None }; let run_ignored = matches.opt_present("ignored"); let logfile = matches.opt_str("logfile"); let logfile = logfile.map(|s| Path::new(s)); let run_benchmarks = matches.opt_present("bench"); let run_tests = ! run_benchmarks || matches.opt_present("test"); let ratchet_metrics = matches.opt_str("ratchet-metrics"); let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s)); let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent"); let ratchet_noise_percent = ratchet_noise_percent.map(|s| from_str::<f64>(s.as_slice()).unwrap()); let save_metrics = matches.opt_str("save-metrics"); let save_metrics = save_metrics.map(|s| Path::new(s)); let test_shard = matches.opt_str("test-shard"); let test_shard = opt_shard(test_shard); let mut nocapture = matches.opt_present("nocapture"); if !nocapture { nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some(); } let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) { Some("auto") | None => AutoColor, Some("always") => AlwaysColor, Some("never") => NeverColor, Some(v) => return Some(Err(format!("argument for --color must be \ auto, always, or never (was {})", v))), }; let show_boxplot = matches.opt_present("boxplot"); let boxplot_width = match matches.opt_str("boxplot-width") { Some(width) => { match FromStr::from_str(width.as_slice()) { Some(width) => width, None => { return Some(Err(format!("argument for --boxplot-width must be a uint"))); } } } None => 50, }; let show_all_stats = matches.opt_present("stats"); let test_opts = TestOpts { filter: filter, run_ignored: run_ignored, run_tests: run_tests, run_benchmarks: run_benchmarks, ratchet_metrics: ratchet_metrics, ratchet_noise_percent: ratchet_noise_percent, save_metrics: save_metrics, test_shard: test_shard, logfile: logfile, nocapture: nocapture, color: color, show_boxplot: show_boxplot, boxplot_width: boxplot_width, show_all_stats: show_all_stats, }; Some(Ok(test_opts)) } pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> { match maybestr { None => None, Some(s) => { let mut it = s.split('.'); match (it.next().and_then(from_str::<uint>), it.next().and_then(from_str::<uint>), it.next()) { (Some(a), Some(b), None) => { if a <= 0 || a > b { panic!("tried to run shard {a}.{b}, but {a} is out of bounds \ (should be between 1 and {b}", a=a, b=b) } Some((a, b)) } _ => None, } } } } #[deriving(Clone, PartialEq)] pub struct BenchSamples { ns_iter_summ: stats::Summary<f64>, mb_s: uint, } #[deriving(Clone, PartialEq)] pub enum TestResult { TrOk, TrFailed, TrIgnored, TrMetrics(MetricMap), TrBench(BenchSamples), } enum OutputLocation<T> { Pretty(Box<term::Terminal<term::WriterWrapper> + Send>), Raw(T), } struct ConsoleTestState<T> { log_out: Option<File>, out: OutputLocation<T>, use_color: bool, show_boxplot: bool, boxplot_width: uint, show_all_stats: bool, total: uint, passed: uint, failed: uint, ignored: uint, measured: uint, metrics: MetricMap, failures: Vec<(TestDesc, Vec<u8> )> , max_name_len: uint, // number of columns to fill when aligning names } impl<T: Writer> ConsoleTestState<T> { pub fn new(opts: &TestOpts, _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> { let log_out = match opts.logfile { Some(ref path) => Some(try!(File::create(path))), None => None }; let out = match term::stdout() { None => Raw(io::stdio::stdout_raw()), Some(t) => Pretty(t) }; Ok(ConsoleTestState { out: out, log_out: log_out, use_color: use_color(opts), show_boxplot: opts.show_boxplot, boxplot_width: opts.boxplot_width, show_all_stats: opts.show_all_stats, total: 0u, passed: 0u, failed: 0u, ignored: 0u, measured: 0u, metrics: MetricMap::new(), failures: Vec::new(), max_name_len: 0u, }) } pub fn write_ok(&mut self) -> io::IoResult<()> { self.write_pretty("ok", term::color::GREEN) } pub fn write_failed(&mut self) -> io::IoResult<()> { self.write_pretty("FAILED", term::color::RED) } pub fn write_ignored(&mut self) -> io::IoResult<()> { self.write_pretty("ignored", term::color::YELLOW) } pub fn write_metric(&mut self) -> io::IoResult<()> { self.write_pretty("metric", term::color::CYAN) } pub fn write_bench(&mut self) -> io::IoResult<()> { self.write_pretty("bench", term::color::CYAN) } pub fn write_added(&mut self) -> io::IoResult<()> { self.write_pretty("added", term::color::GREEN) } pub fn write_improved(&mut self) -> io::IoResult<()> { self.write_pretty("improved", term::color::GREEN) } pub fn write_removed(&mut self) -> io::IoResult<()> { self.write_pretty("removed", term::color::YELLOW) } pub fn write_regressed(&mut self) -> io::IoResult<()> { self.write_pretty("regressed", term::color::RED) } pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::IoResult<()> { match self.out { Pretty(ref mut term) => { if self.use_color { try!(term.fg(color)); } try!(term.write(word.as_bytes())); if self.use_color { try!(term.reset()); } Ok(()) } Raw(ref mut stdout) => stdout.write(word.as_bytes()) } } pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> { match self.out { Pretty(ref mut term) => term.write(s.as_bytes()), Raw(ref mut stdout) => stdout.write(s.as_bytes()) } } pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> { self.total = len; let noun = if len != 1 { "tests" } else { "test" }; self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice()) } pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::IoResult<()> { let name = test.padded_name(self.max_name_len, align); self.write_plain(format!("test {} ... ", name).as_slice()) } pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> { try!(match *result { TrOk => self.write_ok(), TrFailed => self.write_failed(), TrIgnored => self.write_ignored(), TrMetrics(ref mm) => { try!(self.write_metric()); self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice()) } TrBench(ref bs) => { try!(self.write_bench()); if self.show_boxplot { let mut wr = Vec::new(); try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width)); let s = String::from_utf8(wr).unwrap(); try!(self.write_plain(format!(": {}", s).as_slice())); } if self.show_all_stats { let mut wr = Vec::new(); try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ)); let s = String::from_utf8(wr).unwrap(); try!(self.write_plain(format!(": {}", s).as_slice())); } else { try!(self.write_plain(format!(": {}", fmt_bench_samples(bs)).as_slice())); } Ok(()) } }); self.write_plain("\n") } pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::IoResult<()> { match self.log_out { None => Ok(()), Some(ref mut o) => { let s = format!("{} {}\n", match *result { TrOk => "ok".to_string(), TrFailed => "failed".to_string(), TrIgnored => "ignored".to_string(), TrMetrics(ref mm) => fmt_metrics(mm), TrBench(ref bs) => fmt_bench_samples(bs) }, test.name.as_slice()); o.write(s.as_bytes()) } } } pub fn write_failures(&mut self) -> io::IoResult<()> { try!(self.write_plain("\nfailures:\n")); let mut failures = Vec::new(); let mut fail_out = String::new(); for &(ref f, ref stdout) in self.failures.iter() { failures.push(f.name.to_string()); if stdout.len() > 0 { fail_out.push_str(format!("---- {} stdout ----\n\t", f.name.as_slice()).as_slice()); let output = String::from_utf8_lossy(stdout.as_slice()); fail_out.push_str(output.as_slice()); fail_out.push_str("\n"); } } if fail_out.len() > 0 { try!(self.write_plain("\n")); try!(self.write_plain(fail_out.as_slice())); } try!(self.write_plain("\nfailures:\n")); failures.sort(); for name in failures.iter() { try!(self.write_plain(format!(" {}\n", name.as_slice()).as_slice())); } Ok(()) } pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> { let mut noise = 0u; let mut improved = 0u; let mut regressed = 0u; let mut added = 0u; let mut removed = 0u; for (k, v) in diff.iter() { match *v { LikelyNoise => noise += 1, MetricAdded => { added += 1; try!(self.write_added()); try!(self.write_plain(format!(": {}\n", *k).as_slice())); } MetricRemoved => { removed += 1; try!(self.write_removed()); try!(self.write_plain(format!(": {}\n", *k).as_slice())); } Improvement(pct) => { improved += 1; try!(self.write_plain(format!(": {} ", *k).as_slice())); try!(self.write_improved()); try!(self.write_plain(format!(" by {:.2}%\n", pct as f64).as_slice())); } Regression(pct) => { regressed += 1; try!(self.write_plain(format!(": {} ", *k).as_slice())); try!(self.write_regressed()); try!(self.write_plain(format!(" by {:.2}%\n", pct as f64).as_slice())); } } } try!(self.write_plain(format!("result of ratchet: {} metrics added, \ {} removed, {} improved, {} regressed, \ {} noise\n", added, removed, improved, regressed, noise).as_slice())); if regressed == 0 { try!(self.write_plain("updated ratchet file\n")); } else { try!(self.write_plain("left ratchet file untouched\n")); } Ok(()) } pub fn write_run_finish(&mut self, ratchet_metrics: &Option<Path>, ratchet_pct: Option<f64>) -> io::IoResult<bool> { assert!(self.passed + self.failed + self.ignored + self.measured == self.total); let ratchet_success = match *ratchet_metrics { None => true, Some(ref pth) => { try!(self.write_plain(format!("\nusing metrics ratchet: {}\n", pth.display()).as_slice())); match ratchet_pct { None => (), Some(pct) => try!(self.write_plain(format!("with noise-tolerance \ forced to: {}%\n", pct).as_slice())) } let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct); try!(self.write_metric_diff(&diff)); ok } }; let test_success = self.failed == 0u; if !test_success { try!(self.write_failures()); } let success = ratchet_success && test_success; try!(self.write_plain("\ntest result: ")); if success { // There's no parallelism at this point so it's safe to use color try!(self.write_ok()); } else { try!(self.write_failed()); } let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n", self.passed, self.failed, self.ignored, self.measured); try!(self.write_plain(s.as_slice())); return Ok(success); } } pub fn fmt_metrics(mm: &MetricMap) -> String { let MetricMap(ref mm) = *mm; let v : Vec<String> = mm.iter() .map(|(k,v)| format!("{}: {} (+/- {})", *k, v.value as f64, v.noise as f64)) .collect(); v.connect(", ") } pub fn fmt_bench_samples(bs: &BenchSamples) -> String { if bs.mb_s != 0 { format!("{:>9} ns/iter (+/- {}) = {} MB/s", bs.ns_iter_summ.median as uint, (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint, bs.mb_s) } else { format!("{:>9} ns/iter (+/- {})", bs.ns_iter_summ.median as uint, (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint) } } // A simple console test runner pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> { fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> { match (*event).clone() { TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()), TeWait(ref test, padding) => st.write_test_start(test, padding), TeResult(test, result, stdout) => { try!(st.write_log(&test, &result)); try!(st.write_result(&result)); match result { TrOk => st.passed += 1, TrIgnored => st.ignored += 1, TrMetrics(mm) => { let tname = test.name.as_slice(); let MetricMap(mm) = mm; for (k,v) in mm.iter() { st.metrics .insert_metric(format!("{}.{}", tname, k).as_slice(), v.value, v.noise); } st.measured += 1 } TrBench(bs) => { st.metrics.insert_metric(test.name.as_slice(), bs.ns_iter_summ.median, bs.ns_iter_summ.max - bs.ns_iter_summ.min); st.measured += 1 } TrFailed => { st.failed += 1; st.failures.push((test, stdout)); } } Ok(()) } } } let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>)); fn len_if_padded(t: &TestDescAndFn) -> uint { match t.testfn.padding() { PadNone => 0u, PadOnLeft | PadOnRight => t.desc.name.as_slice().len(), } } match tests.iter().max_by(|t|len_if_padded(*t)) { Some(t) => { let n = t.desc.name.as_slice(); st.max_name_len = n.len(); }, None => {} } try!(run_tests(opts, tests, |x| callback(&x, &mut st))); match opts.save_metrics { None => (), Some(ref pth) => { try!(st.metrics.save(pth)); try!(st.write_plain(format!("\nmetrics saved to: {}", pth.display()).as_slice())); } } return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent); } #[test] fn should_sort_failures_before_printing_them() { let test_a = TestDesc { name: StaticTestName("a"), ignore: false, should_fail: ShouldFail::No }; let test_b = TestDesc { name: StaticTestName("b"), ignore: false, should_fail: ShouldFail::No }; let mut st = ConsoleTestState { log_out: None, out: Raw(Vec::new()), use_color: false, show_boxplot: false, boxplot_width: 0, show_all_stats: false, total: 0u, passed: 0u, failed: 0u, ignored: 0u, measured: 0u, max_name_len: 10u, metrics: MetricMap::new(), failures: vec!((test_b, Vec::new()), (test_a, Vec::new())) }; st.write_failures().unwrap(); let s = match st.out { Raw(ref m) => String::from_utf8_lossy(m[]), Pretty(_) => unreachable!() }; let apos = s.find_str("a").unwrap(); let bpos = s.find_str("b").unwrap(); assert!(apos < bpos); } fn use_color(opts: &TestOpts) -> bool { match opts.color { AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(), AlwaysColor => true, NeverColor => false, } } #[deriving(Clone)] enum TestEvent { TeFiltered(Vec<TestDesc> ), TeWait(TestDesc, NamePadding), TeResult(TestDesc, TestResult, Vec<u8> ), } pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> ); fn run_tests(opts: &TestOpts, tests: Vec<TestDescAndFn> , callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> { let filtered_tests = filter_tests(opts, tests); let filtered_descs = filtered_tests.iter() .map(|t| t.desc.clone()) .collect(); try!(callback(TeFiltered(filtered_descs))); let (filtered_tests, filtered_benchs_and_metrics) = filtered_tests.partition(|e| { match e.testfn { StaticTestFn(_) | DynTestFn(_) => true, _ => false } }); // It's tempting to just spawn all the tests at once, but since we have // many tests that run in other processes we would be making a big mess. let concurrency = get_concurrency(); let mut remaining = filtered_tests; remaining.reverse(); let mut pending = 0; let (tx, rx) = channel::<MonitorMsg>(); while pending > 0 || !remaining.is_empty() { while pending < concurrency && !remaining.is_empty() { let test = remaining.pop().unwrap(); if concurrency == 1 { // We are doing one test at a time so we can print the name // of the test before we run it. Useful for debugging tests // that hang forever. try!(callback(TeWait(test.desc.clone(), test.testfn.padding()))); } run_test(opts, !opts.run_tests, test, tx.clone()); pending += 1; } let (desc, result, stdout) = rx.recv(); if concurrency != 1 { try!(callback(TeWait(desc.clone(), PadNone))); } try!(callback(TeResult(desc, result, stdout))); pending -= 1; } // All benchmarks run at the end, in serial. // (this includes metric fns) for b in filtered_benchs_and_metrics.into_iter() { try!(callback(TeWait(b.desc.clone(), b.testfn.padding()))); run_test(opts, !opts.run_benchmarks, b, tx.clone()); let (test, result, stdout) = rx.recv(); try!(callback(TeResult(test, result, stdout))); } Ok(()) } fn get_concurrency() -> uint { use std::rt; match os::getenv("RUST_TEST_TASKS") { Some(s) => { let opt_n: Option<uint> = FromStr::from_str(s.as_slice()); match opt_n { Some(n) if n > 0 => n, _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s) } } None => { rt::default_sched_threads() } } } pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> { let mut filtered = tests; // Remove tests that don't match the test filter filtered = match opts.filter { None => filtered, Some(ref re) => { filtered.into_iter() .filter(|test| re.is_match(test.desc.name.as_slice())).collect() } }; // Maybe pull out the ignored test and unignore them filtered = if !opts.run_ignored { filtered } else { fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> { if test.desc.ignore { let TestDescAndFn {desc, testfn} = test; Some(TestDescAndFn { desc: TestDesc {ignore: false, ..desc}, testfn: testfn }) } else { None } }; filtered.into_iter().filter_map(|x| filter(x)).collect() }; // Sort the tests alphabetically filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice())); // Shard the remaining tests, if sharding requested. match opts.test_shard { None => filtered, Some((a,b)) => { filtered.into_iter().enumerate() // note: using a - 1 so that the valid shards, for example, are // 1.2 and 2.2 instead of 0.2 and 1.2 .filter(|&(i,_)| i % b == (a - 1)) .map(|(_,t)| t) .collect() } } } pub fn run_test(opts: &TestOpts, force_ignore: bool, test: TestDescAndFn, monitor_ch: Sender<MonitorMsg>) { let TestDescAndFn {desc, testfn} = test; if force_ignore || desc.ignore { monitor_ch.send((desc, TrIgnored, Vec::new())); return; } fn run_test_inner(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, testfn: proc():Send) { spawn(proc() { let (tx, rx) = channel(); let mut reader = ChanReader::new(rx); let stdout = ChanWriter::new(tx.clone()); let stderr = ChanWriter::new(tx); let mut task = TaskBuilder::new().named(match desc.name { DynTestName(ref name) => name.clone().to_string(), StaticTestName(name) => name.to_string(), }); if nocapture { drop((stdout, stderr)); } else { task = task.stdout(box stdout as Box<Writer + Send>); task = task.stderr(box stderr as Box<Writer + Send>); } let result_future = task.try_future(testfn); let stdout = reader.read_to_end().unwrap().into_iter().collect(); let task_result = result_future.into_inner(); let test_result = calc_result(&desc, task_result); monitor_ch.send((desc.clone(), test_result, stdout)); }) } match testfn { DynBenchFn(bencher) => { let bs = ::bench::benchmark(|harness| bencher.run(harness)); monitor_ch.send((desc, TrBench(bs), Vec::new())); return; } StaticBenchFn(benchfn) => { let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness)); monitor_ch.send((desc, TrBench(bs), Vec::new())); return; } DynMetricFn(f) => { let mut mm = MetricMap::new(); f(&mut mm); monitor_ch.send((desc, TrMetrics(mm), Vec::new())); return; } StaticMetricFn(f) => { let mut mm = MetricMap::new(); f(&mut mm); monitor_ch.send((desc, TrMetrics(mm), Vec::new())); return; } DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f), StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, proc() f()) } } fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult { match (&desc.should_fail, task_result) { (&ShouldFail::No, Ok(())) | (&ShouldFail::Yes(None), Err(_)) => TrOk, (&ShouldFail::Yes(Some(msg)), Err(ref err)) if err.downcast_ref::<String>() .map(|e| &**e) .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e)) .map(|e| e.contains(msg)) .unwrap_or(false) => TrOk, _ => TrFailed, } } impl MetricMap { pub fn new() -> MetricMap { MetricMap(TreeMap::new()) } /// Load MetricDiff from a file. /// /// # Panics /// /// This function will panic if the path does not exist or the path does not /// contain a valid metric map. pub fn load(p: &Path) -> MetricMap { assert!(p.exists()); let mut f = File::open(p).unwrap(); let value = json::from_reader(&mut f as &mut io::Reader).unwrap(); let mut decoder = json::Decoder::new(value); MetricMap(match Decodable::decode(&mut decoder) { Ok(t) => t, Err(e) => panic!("failure decoding JSON: {}", e) }) } /// Write MetricDiff to a file. pub fn save(&self, p: &Path) -> io::IoResult<()> { let mut file = try!(File::create(p)); let MetricMap(ref map) = *self; let mut enc = json::PrettyEncoder::new(&mut file); map.encode(&mut enc) } /// Compare against another MetricMap. Optionally compare all /// measurements in the maps using the provided `noise_pct` as a /// percentage of each value to consider noise. If `None`, each /// measurement's noise threshold is independently chosen as the /// maximum of that measurement's recorded noise quantity in either /// map. pub fn compare_to_old(&self, old: &MetricMap, noise_pct: Option<f64>) -> MetricDiff { let mut diff : MetricDiff = TreeMap::new(); let MetricMap(ref selfmap) = *self; let MetricMap(ref old) = *old; for (k, vold) in old.iter() { let r = match selfmap.get(k) { None => MetricRemoved, Some(v) => { let delta = v.value - vold.value; let noise = match noise_pct { None => vold.noise.abs().max(v.noise.abs()), Some(pct) => vold.value * pct / 100.0 }; if delta.abs() <= noise { LikelyNoise } else { let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0; if vold.noise < 0.0 { // When 'noise' is negative, it means we want // to see deltas that go up over time, and can // only tolerate slight negative movement. if delta < 0.0 { Regression(pct) } else { Improvement(pct) } } else { // When 'noise' is positive, it means we want // to see deltas that go down over time, and // can only tolerate slight positive movements. if delta < 0.0 { Improvement(pct) } else { Regression(pct) } } } } }; diff.insert((*k).clone(), r); } let MetricMap(ref map) = *self; for (k, _) in map.iter() { if !diff.contains_key(k) { diff.insert((*k).clone(), MetricAdded); } } diff } /// Insert a named `value` (+/- `noise`) metric into the map. The value /// must be non-negative. The `noise` indicates the uncertainty of the /// metric, which doubles as the "noise range" of acceptable /// pairwise-regressions on this named value, when comparing from one /// metric to the next using `compare_to_old`. /// /// If `noise` is positive, then it means this metric is of a value /// you want to see grow smaller, so a change larger than `noise` in the /// positive direction represents a regression. /// /// If `noise` is negative, then it means this metric is of a value /// you want to see grow larger, so a change larger than `noise` in the /// negative direction represents a regression. pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { let m = Metric { value: value, noise: noise }; let MetricMap(ref mut map) = *self; map.insert(name.to_string(), m); } /// Attempt to "ratchet" an external metric file. This involves loading /// metrics from a metric file (if it exists), comparing against /// the metrics in `self` using `compare_to_old`, and rewriting the /// file to contain the metrics in `self` if none of the /// `MetricChange`s are `Regression`. Returns the diff as well /// as a boolean indicating whether the ratchet succeeded. pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) { let old = if p.exists() { MetricMap::load(p) } else { MetricMap::new() }; let diff : MetricDiff = self.compare_to_old(&old, pct); let ok = diff.iter().all(|(_, v)| { match *v { Regression(_) => false, _ => true } }); if ok { self.save(p).unwrap(); } return (diff, ok) } } // Benchmarking /// A function that is opaque to the optimizer, to allow benchmarks to /// pretend to use outputs to assist in avoiding dead-code /// elimination. /// /// This function is a no-op, and does not even read from `dummy`. pub fn black_box<T>(dummy: T) { // we need to "use" the argument in some way LLVM can't // introspect. unsafe {asm!("" : : "r"(&dummy))} } impl Bencher { /// Callback for benchmark functions to run in their body. pub fn iter<T>(&mut self, inner: || -> T) { self.dur = Duration::span(|| { let k = self.iterations; for _ in range(0u64, k) { black_box(inner()); } }); } pub fn ns_elapsed(&mut self) -> u64 { self.dur.num_nanoseconds().unwrap() as u64 } pub fn ns_per_iter(&mut self) -> u64 { if self.iterations == 0 { 0 } else { self.ns_elapsed() / cmp::max(self.iterations, 1) } } pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) { self.iterations = n; f(self); } // This is a more statistics-driven benchmark algorithm pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary<f64> { // Initial bench run to get ballpark figure. let mut n = 1_u64; self.bench_n(n, |x| f(x)); // Try to estimate iter count for 1ms falling back to 1m // iterations if first run took < 1ns. if self.ns_per_iter() == 0 { n = 1_000_000; } else { n = 1_000_000 / cmp::max(self.ns_per_iter(), 1); } // if the first run took more than 1ms we don't want to just // be left doing 0 iterations on every loop. The unfortunate // side effect of not being able to do as many runs is // automatically handled by the statistical analysis below // (i.e. larger error bars). if n == 0 { n = 1; } let mut total_run = Duration::nanoseconds(0); let samples : &mut [f64] = &mut [0.0_f64, ..50]; loop { let mut summ = None; let mut summ5 = None; let loop_run = Duration::span(|| { for p in samples.iter_mut() { self.bench_n(n, |x| f(x)); *p = self.ns_per_iter() as f64; }; stats::winsorize(samples, 5.0); summ = Some(stats::Summary::new(samples)); for p in samples.iter_mut() { self.bench_n(5 * n, |x| f(x)); *p = self.ns_per_iter() as f64; }; stats::winsorize(samples, 5.0); summ5 = Some(stats::Summary::new(samples)); }); let summ = summ.unwrap(); let summ5 = summ5.unwrap(); // If we've run for 100ms and seem to have converged to a // stable median. if loop_run.num_milliseconds() > 100 && summ.median_abs_dev_pct < 1.0 && summ.median - summ5.median < summ5.median_abs_dev { return summ5; } total_run = total_run + loop_run; // Longest we ever run for is 3s. if total_run.num_seconds() > 3 { return summ5; } n *= 2; } } } pub mod bench { use std::cmp; use std::time::Duration; use super::{Bencher, BenchSamples}; pub fn benchmark(f: |&mut Bencher|) -> BenchSamples { let mut bs = Bencher { iterations: 0, dur: Duration::nanoseconds(0), bytes: 0 }; let ns_iter_summ = bs.auto_bench(f); let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); let iter_s = 1_000_000_000 / ns_iter; let mb_s = (bs.bytes * iter_s) / 1_000_000; BenchSamples { ns_iter_summ: ns_iter_summ, mb_s: mb_s as uint } } } #[cfg(test)] mod tests { use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn, TestOpts, run_test, Metric, MetricMap, MetricAdded, MetricRemoved, Improvement, Regression, LikelyNoise, StaticTestName, DynTestName, DynTestFn, ShouldFail}; use std::io::TempDir; #[test] pub fn do_not_run_ignored_tests() { fn f() { panic!(); } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: true, should_fail: ShouldFail::No, }, testfn: DynTestFn(proc() f()), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, tx); let (_, res, _) = rx.recv(); assert!(res != TrOk); } #[test] pub fn ignored_tests_result_in_ignored() { fn f() { } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: true, should_fail: ShouldFail::No, }, testfn: DynTestFn(proc() f()), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, tx); let (_, res, _) = rx.recv(); assert!(res == TrIgnored); } #[test] fn test_should_fail() { fn f() { panic!(); } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_fail: ShouldFail::Yes(None) }, testfn: DynTestFn(proc() f()), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, tx); let (_, res, _) = rx.recv(); assert!(res == TrOk); } #[test] fn test_should_fail_good_message() { fn f() { panic!("an error message"); } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_fail: ShouldFail::Yes(Some("error message")) }, testfn: DynTestFn(proc() f()), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, tx); let (_, res, _) = rx.recv(); assert!(res == TrOk); } #[test] fn test_should_fail_bad_message() { fn f() { panic!("an error message"); } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_fail: ShouldFail::Yes(Some("foobar")) }, testfn: DynTestFn(proc() f()), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, tx); let (_, res, _) = rx.recv(); assert!(res == TrFailed); } #[test] fn test_should_fail_but_succeeds() { fn f() { } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_fail: ShouldFail::Yes(None) }, testfn: DynTestFn(proc() f()), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, tx); let (_, res, _) = rx.recv(); assert!(res == TrFailed); } #[test] fn first_free_arg_should_be_a_filter() { let args = vec!("progname".to_string(), "some_regex_filter".to_string()); let opts = match parse_opts(args.as_slice()) { Some(Ok(o)) => o, _ => panic!("Malformed arg in first_free_arg_should_be_a_filter") }; assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter")) } #[test] fn parse_ignored_flag() { let args = vec!("progname".to_string(), "filter".to_string(), "--ignored".to_string()); let opts = match parse_opts(args.as_slice()) { Some(Ok(o)) => o, _ => panic!("Malformed arg in parse_ignored_flag") }; assert!((opts.run_ignored)); } #[test] pub fn filter_for_ignored_option() { // When we run ignored tests the test filter should filter out all the // unignored tests and flip the ignore flag on the rest to false let mut opts = TestOpts::new(); opts.run_tests = true; opts.run_ignored = true; let tests = vec!( TestDescAndFn { desc: TestDesc { name: StaticTestName("1"), ignore: true, should_fail: ShouldFail::No, }, testfn: DynTestFn(proc() {}), }, TestDescAndFn { desc: TestDesc { name: StaticTestName("2"), ignore: false, should_fail: ShouldFail::No, }, testfn: DynTestFn(proc() {}), }); let filtered = filter_tests(&opts, tests); assert_eq!(filtered.len(), 1); assert_eq!(filtered[0].desc.name.to_string(), "1"); assert!(filtered[0].desc.ignore == false); } #[test] pub fn sort_tests() { let mut opts = TestOpts::new(); opts.run_tests = true; let names = vec!("sha1::test".to_string(), "int::test_to_str".to_string(), "int::test_pow".to_string(), "test::do_not_run_ignored_tests".to_string(), "test::ignored_tests_result_in_ignored".to_string(), "test::first_free_arg_should_be_a_filter".to_string(), "test::parse_ignored_flag".to_string(), "test::filter_for_ignored_option".to_string(), "test::sort_tests".to_string()); let tests = { fn testfn() { } let mut tests = Vec::new(); for name in names.iter() { let test = TestDescAndFn { desc: TestDesc { name: DynTestName((*name).clone()), ignore: false, should_fail: ShouldFail::No, }, testfn: DynTestFn(testfn), }; tests.push(test); } tests }; let filtered = filter_tests(&opts, tests); let expected = vec!("int::test_pow".to_string(), "int::test_to_str".to_string(), "sha1::test".to_string(), "test::do_not_run_ignored_tests".to_string(), "test::filter_for_ignored_option".to_string(), "test::first_free_arg_should_be_a_filter".to_string(), "test::ignored_tests_result_in_ignored".to_string(), "test::parse_ignored_flag".to_string(), "test::sort_tests".to_string()); for (a, b) in expected.iter().zip(filtered.iter()) { assert!(*a == b.desc.name.to_string()); } } #[test] pub fn filter_tests_regex() { let mut opts = TestOpts::new(); opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap()); let mut names = ["yes::abXc", "yes::aXXXbXXXXc", "no::XYZ", "no::abc"]; names.sort(); fn test_fn() {} let tests = names.iter().map(|name| { TestDescAndFn { desc: TestDesc { name: DynTestName(name.to_string()), ignore: false, should_fail: ShouldFail::No, }, testfn: DynTestFn(test_fn) } }).collect(); let filtered = filter_tests(&opts, tests); let expected: Vec<&str> = names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect(); assert_eq!(filtered.len(), expected.len()); for (test, expected_name) in filtered.iter().zip(expected.iter()) { assert_eq!(test.desc.name.as_slice(), *expected_name); } } #[test] pub fn test_metricmap_compare() { let mut m1 = MetricMap::new(); let mut m2 = MetricMap::new(); m1.insert_metric("in-both-noise", 1000.0, 200.0); m2.insert_metric("in-both-noise", 1100.0, 200.0); m1.insert_metric("in-first-noise", 1000.0, 2.0); m2.insert_metric("in-second-noise", 1000.0, 2.0); m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0); m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0); m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0); m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0); m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0); m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0); m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0); m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0); let diff1 = m2.compare_to_old(&m1, None); assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise); assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved); assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded); assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()), Regression(100.0)); assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()), Improvement(50.0)); assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()), Regression(50.0)); assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()), Improvement(100.0)); assert_eq!(diff1.len(), 7); let diff2 = m2.compare_to_old(&m1, Some(200.0)); assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise); assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved); assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded); assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()), LikelyNoise); assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()), LikelyNoise); assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()), LikelyNoise); assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()), LikelyNoise); assert_eq!(diff2.len(), 7); } #[test] pub fn ratchet_test() { let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet"); let pth = dpth.path().join("ratchet.json"); let mut m1 = MetricMap::new(); m1.insert_metric("runtime", 1000.0, 2.0); m1.insert_metric("throughput", 50.0, 2.0); let mut m2 = MetricMap::new(); m2.insert_metric("runtime", 1100.0, 2.0); m2.insert_metric("throughput", 50.0, 2.0); m1.save(&pth).unwrap(); // Ask for a ratchet that should fail to advance. let (diff1, ok1) = m2.ratchet(&pth, None); assert_eq!(ok1, false); assert_eq!(diff1.len(), 2); assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0)); assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise); // Check that it was not rewritten. let m3 = MetricMap::load(&pth); let MetricMap(m3) = m3; assert_eq!(m3.len(), 2); assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0)); assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0)); // Ask for a ratchet with an explicit noise-percentage override, // that should advance. let (diff2, ok2) = m2.ratchet(&pth, Some(10.0)); assert_eq!(ok2, true); assert_eq!(diff2.len(), 2); assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise); assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise); // Check that it was rewritten. let m4 = MetricMap::load(&pth); let MetricMap(m4) = m4; assert_eq!(m4.len(), 2); assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0)); assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0)); } }
34.461538
97
0.52579
f8668ddbd0709e08706c30e120735ac96042ff88
16,852
use super::{conv, HResult as _}; use std::{mem, sync::Arc, thread}; use winapi::{ shared::{dxgi, dxgi1_2, dxgi1_5, minwindef, windef, winerror}, um::{d3d12, d3d12sdklayers, winuser}, }; impl Drop for super::Adapter { fn drop(&mut self) { // Debug tracking alive objects if !thread::panicking() && self .private_caps .instance_flags .contains(crate::InstanceFlags::VALIDATION) { unsafe { self.report_live_objects(); } } unsafe { self.raw.destroy(); } } } impl super::Adapter { pub unsafe fn report_live_objects(&self) { if let Ok(debug_device) = self .raw .cast::<d3d12sdklayers::ID3D12DebugDevice>() .into_result() { debug_device.ReportLiveDeviceObjects( d3d12sdklayers::D3D12_RLDO_SUMMARY | d3d12sdklayers::D3D12_RLDO_IGNORE_INTERNAL, ); debug_device.destroy(); } } #[allow(trivial_casts)] pub(super) fn expose( adapter: native::WeakPtr<dxgi1_2::IDXGIAdapter2>, library: &Arc<native::D3D12Lib>, instance_flags: crate::InstanceFlags, ) -> Option<crate::ExposedAdapter<super::Api>> { // Create the device so that we can get the capabilities. let device = match library.create_device(adapter, native::FeatureLevel::L11_0) { Ok(pair) => match pair.into_result() { Ok(device) => device, Err(err) => { log::warn!("Device creation failed: {}", err); return None; } }, Err(err) => { log::warn!("Device creation function is not found: {:?}", err); return None; } }; // We have found a possible adapter. // Acquire the device information. let mut desc: dxgi1_2::DXGI_ADAPTER_DESC2 = unsafe { mem::zeroed() }; unsafe { adapter.GetDesc2(&mut desc); } let device_name = { use std::{ffi::OsString, os::windows::ffi::OsStringExt}; let len = desc.Description.iter().take_while(|&&c| c != 0).count(); let name = OsString::from_wide(&desc.Description[..len]); name.to_string_lossy().into_owned() }; let mut features_architecture: d3d12::D3D12_FEATURE_DATA_ARCHITECTURE = unsafe { mem::zeroed() }; assert_eq!(0, unsafe { device.CheckFeatureSupport( d3d12::D3D12_FEATURE_ARCHITECTURE, &mut features_architecture as *mut _ as *mut _, mem::size_of::<d3d12::D3D12_FEATURE_DATA_ARCHITECTURE>() as _, ) }); let mut workarounds = super::Workarounds::default(); let info = wgt::AdapterInfo { backend: wgt::Backend::Dx12, name: device_name, vendor: desc.VendorId as usize, device: desc.DeviceId as usize, device_type: if (desc.Flags & dxgi::DXGI_ADAPTER_FLAG_SOFTWARE) != 0 { workarounds.avoid_cpu_descriptor_overwrites = true; wgt::DeviceType::VirtualGpu } else if features_architecture.CacheCoherentUMA != 0 { wgt::DeviceType::IntegratedGpu } else { wgt::DeviceType::DiscreteGpu }, }; let mut options: d3d12::D3D12_FEATURE_DATA_D3D12_OPTIONS = unsafe { mem::zeroed() }; assert_eq!(0, unsafe { device.CheckFeatureSupport( d3d12::D3D12_FEATURE_D3D12_OPTIONS, &mut options as *mut _ as *mut _, mem::size_of::<d3d12::D3D12_FEATURE_DATA_D3D12_OPTIONS>() as _, ) }); let _depth_bounds_test_supported = { let mut features2: d3d12::D3D12_FEATURE_DATA_D3D12_OPTIONS2 = unsafe { mem::zeroed() }; let hr = unsafe { device.CheckFeatureSupport( d3d12::D3D12_FEATURE_D3D12_OPTIONS2, &mut features2 as *mut _ as *mut _, mem::size_of::<d3d12::D3D12_FEATURE_DATA_D3D12_OPTIONS2>() as _, ) }; hr == 0 && features2.DepthBoundsTestSupported != 0 }; //Note: `D3D12_FEATURE_D3D12_OPTIONS3::CastingFullyTypedFormatSupported` can be checked // to know if we can skip "typeless" formats entirely. let private_caps = super::PrivateCapabilities { instance_flags, heterogeneous_resource_heaps: options.ResourceHeapTier != d3d12::D3D12_RESOURCE_HEAP_TIER_1, memory_architecture: if features_architecture.UMA != 0 { super::MemoryArchitecture::Unified { cache_coherent: features_architecture.CacheCoherentUMA != 0, } } else { super::MemoryArchitecture::NonUnified }, heap_create_not_zeroed: false, //TODO: winapi support for Options7 }; // Theoretically vram limited, but in practice 2^20 is the limit let tier3_practical_descriptor_limit = 1 << 20; let (full_heap_count, _uav_count) = match options.ResourceBindingTier { d3d12::D3D12_RESOURCE_BINDING_TIER_1 => ( d3d12::D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1, 8, // conservative, is 64 on feature level 11.1 ), d3d12::D3D12_RESOURCE_BINDING_TIER_2 => ( d3d12::D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_2, 64, ), d3d12::D3D12_RESOURCE_BINDING_TIER_3 => ( tier3_practical_descriptor_limit, tier3_practical_descriptor_limit, ), other => { log::warn!("Unknown resource binding tier {}", other); ( d3d12::D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1, 8, ) } }; let mut features = wgt::Features::empty() | wgt::Features::DEPTH_CLAMPING | wgt::Features::MAPPABLE_PRIMARY_BUFFERS //TODO: Naga part //| wgt::Features::TEXTURE_BINDING_ARRAY //| wgt::Features::BUFFER_BINDING_ARRAY //| wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY //| wgt::Features::UNSIZED_BINDING_ARRAY | wgt::Features::MULTI_DRAW_INDIRECT | wgt::Features::MULTI_DRAW_INDIRECT_COUNT | wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER | wgt::Features::POLYGON_MODE_LINE | wgt::Features::POLYGON_MODE_POINT | wgt::Features::VERTEX_WRITABLE_STORAGE | wgt::Features::TIMESTAMP_QUERY | wgt::Features::TEXTURE_COMPRESSION_BC | wgt::Features::CLEAR_COMMANDS; //TODO: in order to expose this, we need to run a compute shader // that extract the necessary statistics out of the D3D12 result. // Alternatively, we could allocate a buffer for the query set, // write the results there, and issue a bunch of copy commands. //| wgt::Features::PIPELINE_STATISTICS_QUERY features.set( wgt::Features::CONSERVATIVE_RASTERIZATION, options.ConservativeRasterizationTier != d3d12::D3D12_CONSERVATIVE_RASTERIZATION_TIER_NOT_SUPPORTED, ); let base = wgt::Limits::default(); Some(crate::ExposedAdapter { adapter: super::Adapter { raw: adapter, device, library: Arc::clone(library), private_caps, workarounds, }, info, features, capabilities: crate::Capabilities { limits: wgt::Limits { max_texture_dimension_1d: d3d12::D3D12_REQ_TEXTURE1D_U_DIMENSION, max_texture_dimension_2d: d3d12::D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION .min(d3d12::D3D12_REQ_TEXTURECUBE_DIMENSION), max_texture_dimension_3d: d3d12::D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION, max_texture_array_layers: d3d12::D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION, max_bind_groups: crate::MAX_BIND_GROUPS as u32, // dynamic offsets take a root constant, so we expose the minimum here max_dynamic_uniform_buffers_per_pipeline_layout: base .max_dynamic_uniform_buffers_per_pipeline_layout, max_dynamic_storage_buffers_per_pipeline_layout: base .max_dynamic_storage_buffers_per_pipeline_layout, max_sampled_textures_per_shader_stage: match options.ResourceBindingTier { d3d12::D3D12_RESOURCE_BINDING_TIER_1 => 128, _ => full_heap_count, }, max_samplers_per_shader_stage: match options.ResourceBindingTier { d3d12::D3D12_RESOURCE_BINDING_TIER_1 => 16, _ => d3d12::D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE, }, // these both account towards `uav_count`, but we can't express the limit as as sum max_storage_buffers_per_shader_stage: base.max_storage_buffers_per_shader_stage, max_storage_textures_per_shader_stage: base .max_storage_textures_per_shader_stage, max_uniform_buffers_per_shader_stage: full_heap_count, max_uniform_buffer_binding_size: d3d12::D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16, max_storage_buffer_binding_size: !0, max_vertex_buffers: d3d12::D3D12_VS_INPUT_REGISTER_COUNT .min(crate::MAX_VERTEX_BUFFERS as u32), max_vertex_attributes: d3d12::D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT, max_vertex_buffer_array_stride: d3d12::D3D12_SO_BUFFER_MAX_STRIDE_IN_BYTES, max_push_constant_size: 0, }, alignments: crate::Alignments { buffer_copy_offset: wgt::BufferSize::new( d3d12::D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT as u64, ) .unwrap(), buffer_copy_pitch: wgt::BufferSize::new( d3d12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT as u64, ) .unwrap(), uniform_buffer_offset: wgt::BufferSize::new( d3d12::D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT as u64, ) .unwrap(), storage_buffer_offset: wgt::BufferSize::new(4).unwrap(), //TODO? }, downlevel: wgt::DownlevelCapabilities::default(), }, }) } } impl crate::Adapter<super::Api> for super::Adapter { unsafe fn open( &self, features: wgt::Features, ) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> { let queue = self .device .create_command_queue( native::CmdListType::Direct, native::Priority::Normal, native::CommandQueueFlags::empty(), 0, ) .into_device_result("Queue creation")?; let device = super::Device::new( self.device, queue, features, self.private_caps, &self.library, )?; Ok(crate::OpenDevice { device, queue: super::Queue { raw: queue, temp_lists: Vec::new(), }, }) } #[allow(trivial_casts)] unsafe fn texture_format_capabilities( &self, format: wgt::TextureFormat, ) -> crate::TextureFormatCapabilities { use crate::TextureFormatCapabilities as Tfc; let raw_format = conv::map_texture_format(format); let mut data = d3d12::D3D12_FEATURE_DATA_FORMAT_SUPPORT { Format: raw_format, Support1: mem::zeroed(), Support2: mem::zeroed(), }; assert_eq!( winerror::S_OK, self.device.CheckFeatureSupport( d3d12::D3D12_FEATURE_FORMAT_SUPPORT, &mut data as *mut _ as *mut _, mem::size_of::<d3d12::D3D12_FEATURE_DATA_FORMAT_SUPPORT>() as _, ) ); let mut caps = Tfc::COPY_SRC | Tfc::COPY_DST; let can_image = 0 != data.Support1 & (d3d12::D3D12_FORMAT_SUPPORT1_TEXTURE1D | d3d12::D3D12_FORMAT_SUPPORT1_TEXTURE2D | d3d12::D3D12_FORMAT_SUPPORT1_TEXTURE3D | d3d12::D3D12_FORMAT_SUPPORT1_TEXTURECUBE); caps.set(Tfc::SAMPLED, can_image); caps.set( Tfc::SAMPLED_LINEAR, data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE != 0, ); caps.set( Tfc::COLOR_ATTACHMENT, data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_RENDER_TARGET != 0, ); caps.set( Tfc::COLOR_ATTACHMENT_BLEND, data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_BLENDABLE != 0, ); caps.set( Tfc::DEPTH_STENCIL_ATTACHMENT, data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_DEPTH_STENCIL != 0, ); caps.set( Tfc::STORAGE, data.Support1 & d3d12::D3D12_FORMAT_SUPPORT1_TYPED_UNORDERED_ACCESS_VIEW != 0, ); caps.set( Tfc::STORAGE_READ_WRITE, data.Support2 & d3d12::D3D12_FORMAT_SUPPORT2_UAV_TYPED_LOAD != 0, ); caps } unsafe fn surface_capabilities( &self, surface: &super::Surface, ) -> Option<crate::SurfaceCapabilities> { let current_extent = { let mut rect: windef::RECT = mem::zeroed(); if winuser::GetClientRect(surface.wnd_handle, &mut rect) != 0 { Some(wgt::Extent3d { width: (rect.right - rect.left) as u32, height: (rect.bottom - rect.top) as u32, depth_or_array_layers: 1, }) } else { log::warn!("Unable to get the window client rect"); None } }; let mut present_modes = vec![wgt::PresentMode::Fifo]; #[allow(trivial_casts)] if let Ok(factory5) = surface .factory .cast::<dxgi1_5::IDXGIFactory5>() .into_result() { let mut allow_tearing: minwindef::BOOL = minwindef::FALSE; let hr = factory5.CheckFeatureSupport( dxgi1_5::DXGI_FEATURE_PRESENT_ALLOW_TEARING, &mut allow_tearing as *mut _ as *mut _, mem::size_of::<minwindef::BOOL>() as _, ); factory5.destroy(); match hr.into_result() { Err(err) => log::warn!("Unable to check for tearing support: {}", err), Ok(()) => present_modes.push(wgt::PresentMode::Immediate), } } Some(crate::SurfaceCapabilities { formats: vec![ wgt::TextureFormat::Bgra8UnormSrgb, wgt::TextureFormat::Bgra8Unorm, wgt::TextureFormat::Rgba8UnormSrgb, wgt::TextureFormat::Rgba8Unorm, wgt::TextureFormat::Rgb10a2Unorm, wgt::TextureFormat::Rgba16Float, ], // we currently use a flip effect which supports 2..=16 buffers swap_chain_sizes: 2..=16, current_extent, // TODO: figure out the exact bounds extents: wgt::Extent3d { width: 16, height: 16, depth_or_array_layers: 1, }..=wgt::Extent3d { width: 4096, height: 4096, depth_or_array_layers: 1, }, usage: crate::TextureUses::COLOR_TARGET | crate::TextureUses::COPY_SRC | crate::TextureUses::COPY_DST, present_modes, composite_alpha_modes: vec![ crate::CompositeAlphaMode::Opaque, crate::CompositeAlphaMode::PreMultiplied, crate::CompositeAlphaMode::PostMultiplied, ], }) } }
39.651765
103
0.549905
d6b3e674f8f804878c727db063d25736b3f47ae2
22,819
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc::ty::{self, TyCtxt}; use rustc::session::config::BorrowckMode; use rustc_errors::{DiagnosticBuilder, DiagnosticId}; use syntax_pos::{MultiSpan, Span}; use std::fmt; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Origin { Ast, Mir } impl fmt::Display for Origin { fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { // If the user passed `-Z borrowck=compare`, then include // origin info as part of the error report, // otherwise let display_origin = ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { tcx.sess.opts.borrowck_mode == BorrowckMode::Compare } else { false } }); if display_origin { match *self { Origin::Mir => write!(w, " (Mir)"), Origin::Ast => write!(w, " (Ast)"), } } else { // Print no origin info Ok(()) } } } impl Origin { /// Whether we should emit errors for the origin in the given mode pub fn should_emit_errors(self, mode: BorrowckMode) -> bool { match self { Origin::Ast => mode.use_ast(), Origin::Mir => mode.use_mir(), } } } pub trait BorrowckErrors<'cx>: Sized + Copy { fn struct_span_err_with_code<S: Into<MultiSpan>>(self, sp: S, msg: &str, code: DiagnosticId) -> DiagnosticBuilder<'cx>; fn struct_span_err<S: Into<MultiSpan>>(self, sp: S, msg: &str) -> DiagnosticBuilder<'cx>; /// Cancels the given error if we shouldn't emit errors for a given /// origin in the current mode. /// /// Always make sure that the error gets passed through this function /// before you return it. fn cancel_if_wrong_origin(self, diag: DiagnosticBuilder<'cx>, o: Origin) -> DiagnosticBuilder<'cx>; fn cannot_move_when_borrowed(self, span: Span, desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0505, "cannot move out of `{}` because it is borrowed{OGN}", desc, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_use_when_mutably_borrowed(self, span: Span, desc: &str, borrow_span: Span, borrow_desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, span, E0503, "cannot use `{}` because it was mutably borrowed{OGN}", desc, OGN=o); err.span_label(borrow_span, format!("borrow of `{}` occurs here", borrow_desc)); err.span_label(span, format!("use of borrowed `{}`", borrow_desc)); self.cancel_if_wrong_origin(err, o) } fn cannot_act_on_uninitialized_variable(self, span: Span, verb: &str, desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0381, "{} of possibly uninitialized variable: `{}`{OGN}", verb, desc, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_mutably_borrow_multiply(self, new_loan_span: Span, desc: &str, opt_via: &str, old_loan_span: Span, old_opt_via: &str, old_load_end_span: Option<Span>, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, new_loan_span, E0499, "cannot borrow `{}`{} as mutable more than once at a time{OGN}", desc, opt_via, OGN=o); if old_loan_span == new_loan_span { // Both borrows are happening in the same place // Meaning the borrow is occurring in a loop err.span_label(new_loan_span, format!("mutable borrow starts here in previous \ iteration of loop{}", opt_via)); if let Some(old_load_end_span) = old_load_end_span { err.span_label(old_load_end_span, "mutable borrow ends here"); } } else { err.span_label(old_loan_span, format!("first mutable borrow occurs here{}", old_opt_via)); err.span_label(new_loan_span, format!("second mutable borrow occurs here{}", opt_via)); if let Some(old_load_end_span) = old_load_end_span { err.span_label(old_load_end_span, "first borrow ends here"); } } self.cancel_if_wrong_origin(err, o) } fn cannot_uniquely_borrow_by_two_closures(self, new_loan_span: Span, desc: &str, old_loan_span: Span, old_load_end_span: Option<Span>, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, new_loan_span, E0524, "two closures require unique access to `{}` at the same time{OGN}", desc, OGN=o); err.span_label( old_loan_span, "first closure is constructed here"); err.span_label( new_loan_span, "second closure is constructed here"); if let Some(old_load_end_span) = old_load_end_span { err.span_label( old_load_end_span, "borrow from first closure ends here"); } self.cancel_if_wrong_origin(err, o) } fn cannot_uniquely_borrow_by_one_closure(self, new_loan_span: Span, desc_new: &str, opt_via: &str, old_loan_span: Span, noun_old: &str, old_opt_via: &str, previous_end_span: Option<Span>, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, new_loan_span, E0500, "closure requires unique access to `{}` but {} is already borrowed{}{OGN}", desc_new, noun_old, old_opt_via, OGN=o); err.span_label(new_loan_span, format!("closure construction occurs here{}", opt_via)); err.span_label(old_loan_span, format!("borrow occurs here{}", old_opt_via)); if let Some(previous_end_span) = previous_end_span { err.span_label(previous_end_span, "borrow ends here"); } self.cancel_if_wrong_origin(err, o) } fn cannot_reborrow_already_uniquely_borrowed(self, new_loan_span: Span, desc_new: &str, opt_via: &str, kind_new: &str, old_loan_span: Span, old_opt_via: &str, previous_end_span: Option<Span>, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, new_loan_span, E0501, "cannot borrow `{}`{} as {} because previous closure \ requires unique access{OGN}", desc_new, opt_via, kind_new, OGN=o); err.span_label(new_loan_span, format!("borrow occurs here{}", opt_via)); err.span_label(old_loan_span, format!("closure construction occurs here{}", old_opt_via)); if let Some(previous_end_span) = previous_end_span { err.span_label(previous_end_span, "borrow from closure ends here"); } self.cancel_if_wrong_origin(err, o) } fn cannot_reborrow_already_borrowed(self, span: Span, desc_new: &str, msg_new: &str, kind_new: &str, old_span: Span, noun_old: &str, kind_old: &str, msg_old: &str, old_load_end_span: Option<Span>, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, span, E0502, "cannot borrow `{}`{} as {} because {} is also borrowed as {}{}{OGN}", desc_new, msg_new, kind_new, noun_old, kind_old, msg_old, OGN=o); err.span_label(span, format!("{} borrow occurs here{}", kind_new, msg_new)); err.span_label(old_span, format!("{} borrow occurs here{}", kind_old, msg_old)); if let Some(old_load_end_span) = old_load_end_span { err.span_label(old_load_end_span, format!("{} borrow ends here", kind_old)); } self.cancel_if_wrong_origin(err, o) } fn cannot_assign_to_borrowed(self, span: Span, borrow_span: Span, desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, span, E0506, "cannot assign to `{}` because it is borrowed{OGN}", desc, OGN=o); err.span_label(borrow_span, format!("borrow of `{}` occurs here", desc)); err.span_label(span, format!("assignment to borrowed `{}` occurs here", desc)); self.cancel_if_wrong_origin(err, o) } fn cannot_move_into_closure(self, span: Span, desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0504, "cannot move `{}` into closure because it is borrowed{OGN}", desc, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_reassign_immutable(self, span: Span, desc: &str, is_arg: bool, o: Origin) -> DiagnosticBuilder<'cx> { let msg = if is_arg { "to immutable argument" } else { "twice to immutable variable" }; let err = struct_span_err!(self, span, E0384, "cannot assign {} `{}`{OGN}", msg, desc, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_assign(self, span: Span, desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0594, "cannot assign to {}{OGN}", desc, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_assign_static(self, span: Span, desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { self.cannot_assign(span, &format!("immutable static item `{}`", desc), o) } fn cannot_move_out_of(self, move_from_span: Span, move_from_desc: &str, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, move_from_span, E0507, "cannot move out of {}{OGN}", move_from_desc, OGN=o); err.span_label( move_from_span, format!("cannot move out of {}", move_from_desc)); self.cancel_if_wrong_origin(err, o) } fn cannot_move_out_of_interior_noncopy(self, move_from_span: Span, ty: ty::Ty, is_index: bool, o: Origin) -> DiagnosticBuilder<'cx> { let type_name = match (&ty.sty, is_index) { (&ty::TyArray(_, _), true) => "array", (&ty::TySlice(_), _) => "slice", _ => span_bug!(move_from_span, "this path should not cause illegal move"), }; let mut err = struct_span_err!(self, move_from_span, E0508, "cannot move out of type `{}`, \ a non-copy {}{OGN}", ty, type_name, OGN=o); err.span_label(move_from_span, "cannot move out of here"); self.cancel_if_wrong_origin(err, o) } fn cannot_move_out_of_interior_of_drop(self, move_from_span: Span, container_ty: ty::Ty, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, move_from_span, E0509, "cannot move out of type `{}`, \ which implements the `Drop` trait{OGN}", container_ty, OGN=o); err.span_label(move_from_span, "cannot move out of here"); self.cancel_if_wrong_origin(err, o) } fn cannot_act_on_moved_value(self, use_span: Span, verb: &str, optional_adverb_for_moved: &str, moved_path: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, use_span, E0382, "{} of {}moved value: `{}`{OGN}", verb, optional_adverb_for_moved, moved_path, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_partially_reinit_an_uninit_struct(self, span: Span, uninit_path: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0383, "partial reinitialization of uninitialized structure `{}`{OGN}", uninit_path, OGN=o); self.cancel_if_wrong_origin(err, o) } fn closure_cannot_assign_to_borrowed(self, span: Span, descr: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0595, "closure cannot assign to {}{OGN}", descr, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_borrow_path_as_mutable(self, span: Span, path: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0596, "cannot borrow {} as mutable{OGN}", path, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_borrow_across_generator_yield(self, span: Span, yield_span: Span, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, span, E0626, "borrow may still be in use when generator yields{OGN}", OGN=o); err.span_label(yield_span, "possible yield occurs here"); self.cancel_if_wrong_origin(err, o) } fn path_does_not_live_long_enough(self, span: Span, path: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0597, "{} does not live long enough{OGN}", path, OGN=o); self.cancel_if_wrong_origin(err, o) } fn lifetime_too_short_for_reborrow(self, span: Span, path: &str, o: Origin) -> DiagnosticBuilder<'cx> { let err = struct_span_err!(self, span, E0598, "lifetime of {} is too short to guarantee \ its contents can be safely reborrowed{OGN}", path, OGN=o); self.cancel_if_wrong_origin(err, o) } fn cannot_act_on_capture_in_sharable_fn(self, span: Span, bad_thing: &str, help: (Span, &str), o: Origin) -> DiagnosticBuilder<'cx> { let (help_span, help_msg) = help; let mut err = struct_span_err!(self, span, E0387, "{} in a captured outer variable in an `Fn` closure{OGN}", bad_thing, OGN=o); err.span_help(help_span, help_msg); self.cancel_if_wrong_origin(err, o) } fn cannot_assign_into_immutable_reference(self, span: Span, bad_thing: &str, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, span, E0389, "{} in a `&` reference{OGN}", bad_thing, OGN=o); err.span_label(span, "assignment into an immutable reference"); self.cancel_if_wrong_origin(err, o) } fn cannot_capture_in_long_lived_closure(self, closure_span: Span, borrowed_path: &str, capture_span: Span, o: Origin) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!(self, closure_span, E0373, "closure may outlive the current function, \ but it borrows {}, \ which is owned by the current function{OGN}", borrowed_path, OGN=o); err.span_label(capture_span, format!("{} is borrowed here", borrowed_path)) .span_label(closure_span, format!("may outlive borrowed value {}", borrowed_path)); self.cancel_if_wrong_origin(err, o) } } impl<'cx, 'gcx, 'tcx> BorrowckErrors<'cx> for TyCtxt<'cx, 'gcx, 'tcx> { fn struct_span_err_with_code<S: Into<MultiSpan>>(self, sp: S, msg: &str, code: DiagnosticId) -> DiagnosticBuilder<'cx> { self.sess.struct_span_err_with_code(sp, msg, code) } fn struct_span_err<S: Into<MultiSpan>>(self, sp: S, msg: &str) -> DiagnosticBuilder<'cx> { self.sess.struct_span_err(sp, msg) } fn cancel_if_wrong_origin(self, mut diag: DiagnosticBuilder<'cx>, o: Origin) -> DiagnosticBuilder<'cx> { if !o.should_emit_errors(self.borrowck_mode()) { self.sess.diagnostic().cancel(&mut diag); } diag } }
43.54771
100
0.430212
d76cd02380c797865b8be24f38dfa925e81395b5
26,959
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StartSnapshotOutput { /// <p>The description of the snapshot.</p> pub description: std::option::Option<std::string::String>, /// <p>The ID of the snapshot.</p> pub snapshot_id: std::option::Option<std::string::String>, /// <p>The AWS account ID of the snapshot owner.</p> pub owner_id: std::option::Option<std::string::String>, /// <p>The status of the snapshot.</p> pub status: std::option::Option<crate::model::Status>, /// <p>The timestamp when the snapshot was created.</p> pub start_time: std::option::Option<smithy_types::Instant>, /// <p>The size of the volume, in GiB.</p> pub volume_size: std::option::Option<i64>, /// <p>The size of the blocks in the snapshot, in bytes.</p> pub block_size: std::option::Option<i32>, /// <p>The tags applied to the snapshot. You can specify up to 50 tags per snapshot. For more /// information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html"> Tagging your Amazon EC2 /// resources</a> in the <i>Amazon Elastic Compute Cloud User Guide</i>.</p> pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>, /// <p>The ID of the parent snapshot.</p> pub parent_snapshot_id: std::option::Option<std::string::String>, /// <p>The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer /// master key (CMK) used to encrypt the snapshot.</p> pub kms_key_arn: std::option::Option<std::string::String>, } impl std::fmt::Debug for StartSnapshotOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StartSnapshotOutput"); formatter.field("description", &self.description); formatter.field("snapshot_id", &self.snapshot_id); formatter.field("owner_id", &self.owner_id); formatter.field("status", &self.status); formatter.field("start_time", &self.start_time); formatter.field("volume_size", &self.volume_size); formatter.field("block_size", &self.block_size); formatter.field("tags", &self.tags); formatter.field("parent_snapshot_id", &self.parent_snapshot_id); formatter.field("kms_key_arn", &"*** Sensitive Data Redacted ***"); formatter.finish() } } /// See [`StartSnapshotOutput`](crate::output::StartSnapshotOutput) pub mod start_snapshot_output { /// A builder for [`StartSnapshotOutput`](crate::output::StartSnapshotOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) description: std::option::Option<std::string::String>, pub(crate) snapshot_id: std::option::Option<std::string::String>, pub(crate) owner_id: std::option::Option<std::string::String>, pub(crate) status: std::option::Option<crate::model::Status>, pub(crate) start_time: std::option::Option<smithy_types::Instant>, pub(crate) volume_size: std::option::Option<i64>, pub(crate) block_size: std::option::Option<i32>, pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>, pub(crate) parent_snapshot_id: std::option::Option<std::string::String>, pub(crate) kms_key_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The description of the snapshot.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.description = Some(input.into()); self } pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.description = input; self } /// <p>The ID of the snapshot.</p> pub fn snapshot_id(mut self, input: impl Into<std::string::String>) -> Self { self.snapshot_id = Some(input.into()); self } pub fn set_snapshot_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.snapshot_id = input; self } /// <p>The AWS account ID of the snapshot owner.</p> pub fn owner_id(mut self, input: impl Into<std::string::String>) -> Self { self.owner_id = Some(input.into()); self } pub fn set_owner_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.owner_id = input; self } /// <p>The status of the snapshot.</p> pub fn status(mut self, input: crate::model::Status) -> Self { self.status = Some(input); self } pub fn set_status(mut self, input: std::option::Option<crate::model::Status>) -> Self { self.status = input; self } /// <p>The timestamp when the snapshot was created.</p> pub fn start_time(mut self, input: smithy_types::Instant) -> Self { self.start_time = Some(input); self } pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self { self.start_time = input; self } /// <p>The size of the volume, in GiB.</p> pub fn volume_size(mut self, input: i64) -> Self { self.volume_size = Some(input); self } pub fn set_volume_size(mut self, input: std::option::Option<i64>) -> Self { self.volume_size = input; self } /// <p>The size of the blocks in the snapshot, in bytes.</p> pub fn block_size(mut self, input: i32) -> Self { self.block_size = Some(input); self } pub fn set_block_size(mut self, input: std::option::Option<i32>) -> Self { self.block_size = input; self } pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self { let mut v = self.tags.unwrap_or_default(); v.push(input.into()); self.tags = Some(v); self } pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.tags = input; self } /// <p>The ID of the parent snapshot.</p> pub fn parent_snapshot_id(mut self, input: impl Into<std::string::String>) -> Self { self.parent_snapshot_id = Some(input.into()); self } pub fn set_parent_snapshot_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.parent_snapshot_id = input; self } /// <p>The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer /// master key (CMK) used to encrypt the snapshot.</p> pub fn kms_key_arn(mut self, input: impl Into<std::string::String>) -> Self { self.kms_key_arn = Some(input.into()); self } pub fn set_kms_key_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.kms_key_arn = input; self } /// Consumes the builder and constructs a [`StartSnapshotOutput`](crate::output::StartSnapshotOutput) pub fn build(self) -> crate::output::StartSnapshotOutput { crate::output::StartSnapshotOutput { description: self.description, snapshot_id: self.snapshot_id, owner_id: self.owner_id, status: self.status, start_time: self.start_time, volume_size: self.volume_size, block_size: self.block_size, tags: self.tags, parent_snapshot_id: self.parent_snapshot_id, kms_key_arn: self.kms_key_arn, } } } } impl StartSnapshotOutput { /// Creates a new builder-style object to manufacture [`StartSnapshotOutput`](crate::output::StartSnapshotOutput) pub fn builder() -> crate::output::start_snapshot_output::Builder { crate::output::start_snapshot_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct PutSnapshotBlockOutput { /// <p>The SHA256 checksum generated for the block data by Amazon EBS.</p> pub checksum: std::option::Option<std::string::String>, /// <p>The algorithm used by Amazon EBS to generate the checksum.</p> pub checksum_algorithm: std::option::Option<crate::model::ChecksumAlgorithm>, } impl std::fmt::Debug for PutSnapshotBlockOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("PutSnapshotBlockOutput"); formatter.field("checksum", &self.checksum); formatter.field("checksum_algorithm", &self.checksum_algorithm); formatter.finish() } } /// See [`PutSnapshotBlockOutput`](crate::output::PutSnapshotBlockOutput) pub mod put_snapshot_block_output { /// A builder for [`PutSnapshotBlockOutput`](crate::output::PutSnapshotBlockOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) checksum: std::option::Option<std::string::String>, pub(crate) checksum_algorithm: std::option::Option<crate::model::ChecksumAlgorithm>, } impl Builder { /// <p>The SHA256 checksum generated for the block data by Amazon EBS.</p> pub fn checksum(mut self, input: impl Into<std::string::String>) -> Self { self.checksum = Some(input.into()); self } pub fn set_checksum(mut self, input: std::option::Option<std::string::String>) -> Self { self.checksum = input; self } /// <p>The algorithm used by Amazon EBS to generate the checksum.</p> pub fn checksum_algorithm(mut self, input: crate::model::ChecksumAlgorithm) -> Self { self.checksum_algorithm = Some(input); self } pub fn set_checksum_algorithm( mut self, input: std::option::Option<crate::model::ChecksumAlgorithm>, ) -> Self { self.checksum_algorithm = input; self } /// Consumes the builder and constructs a [`PutSnapshotBlockOutput`](crate::output::PutSnapshotBlockOutput) pub fn build(self) -> crate::output::PutSnapshotBlockOutput { crate::output::PutSnapshotBlockOutput { checksum: self.checksum, checksum_algorithm: self.checksum_algorithm, } } } } impl PutSnapshotBlockOutput { /// Creates a new builder-style object to manufacture [`PutSnapshotBlockOutput`](crate::output::PutSnapshotBlockOutput) pub fn builder() -> crate::output::put_snapshot_block_output::Builder { crate::output::put_snapshot_block_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListSnapshotBlocksOutput { /// <p>An array of objects containing information about the blocks.</p> pub blocks: std::option::Option<std::vec::Vec<crate::model::Block>>, /// <p>The time when the <code>BlockToken</code> expires.</p> pub expiry_time: std::option::Option<smithy_types::Instant>, /// <p>The size of the volume in GB.</p> pub volume_size: std::option::Option<i64>, /// <p>The size of the block.</p> pub block_size: std::option::Option<i32>, /// <p>The token to use to retrieve the next page of results. This value is null when there /// are no more results to return.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListSnapshotBlocksOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListSnapshotBlocksOutput"); formatter.field("blocks", &"*** Sensitive Data Redacted ***"); formatter.field("expiry_time", &self.expiry_time); formatter.field("volume_size", &self.volume_size); formatter.field("block_size", &self.block_size); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListSnapshotBlocksOutput`](crate::output::ListSnapshotBlocksOutput) pub mod list_snapshot_blocks_output { /// A builder for [`ListSnapshotBlocksOutput`](crate::output::ListSnapshotBlocksOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) blocks: std::option::Option<std::vec::Vec<crate::model::Block>>, pub(crate) expiry_time: std::option::Option<smithy_types::Instant>, pub(crate) volume_size: std::option::Option<i64>, pub(crate) block_size: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn blocks(mut self, input: impl Into<crate::model::Block>) -> Self { let mut v = self.blocks.unwrap_or_default(); v.push(input.into()); self.blocks = Some(v); self } pub fn set_blocks( mut self, input: std::option::Option<std::vec::Vec<crate::model::Block>>, ) -> Self { self.blocks = input; self } /// <p>The time when the <code>BlockToken</code> expires.</p> pub fn expiry_time(mut self, input: smithy_types::Instant) -> Self { self.expiry_time = Some(input); self } pub fn set_expiry_time( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.expiry_time = input; self } /// <p>The size of the volume in GB.</p> pub fn volume_size(mut self, input: i64) -> Self { self.volume_size = Some(input); self } pub fn set_volume_size(mut self, input: std::option::Option<i64>) -> Self { self.volume_size = input; self } /// <p>The size of the block.</p> pub fn block_size(mut self, input: i32) -> Self { self.block_size = Some(input); self } pub fn set_block_size(mut self, input: std::option::Option<i32>) -> Self { self.block_size = input; self } /// <p>The token to use to retrieve the next page of results. This value is null when there /// are no more results to return.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListSnapshotBlocksOutput`](crate::output::ListSnapshotBlocksOutput) pub fn build(self) -> crate::output::ListSnapshotBlocksOutput { crate::output::ListSnapshotBlocksOutput { blocks: self.blocks, expiry_time: self.expiry_time, volume_size: self.volume_size, block_size: self.block_size, next_token: self.next_token, } } } } impl ListSnapshotBlocksOutput { /// Creates a new builder-style object to manufacture [`ListSnapshotBlocksOutput`](crate::output::ListSnapshotBlocksOutput) pub fn builder() -> crate::output::list_snapshot_blocks_output::Builder { crate::output::list_snapshot_blocks_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListChangedBlocksOutput { /// <p>An array of objects containing information about the changed blocks.</p> pub changed_blocks: std::option::Option<std::vec::Vec<crate::model::ChangedBlock>>, /// <p>The time when the <code>BlockToken</code> expires.</p> pub expiry_time: std::option::Option<smithy_types::Instant>, /// <p>The size of the volume in GB.</p> pub volume_size: std::option::Option<i64>, /// <p>The size of the block.</p> pub block_size: std::option::Option<i32>, /// <p>The token to use to retrieve the next page of results. This value is null when there /// are no more results to return.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListChangedBlocksOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListChangedBlocksOutput"); formatter.field("changed_blocks", &self.changed_blocks); formatter.field("expiry_time", &self.expiry_time); formatter.field("volume_size", &self.volume_size); formatter.field("block_size", &self.block_size); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListChangedBlocksOutput`](crate::output::ListChangedBlocksOutput) pub mod list_changed_blocks_output { /// A builder for [`ListChangedBlocksOutput`](crate::output::ListChangedBlocksOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) changed_blocks: std::option::Option<std::vec::Vec<crate::model::ChangedBlock>>, pub(crate) expiry_time: std::option::Option<smithy_types::Instant>, pub(crate) volume_size: std::option::Option<i64>, pub(crate) block_size: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn changed_blocks(mut self, input: impl Into<crate::model::ChangedBlock>) -> Self { let mut v = self.changed_blocks.unwrap_or_default(); v.push(input.into()); self.changed_blocks = Some(v); self } pub fn set_changed_blocks( mut self, input: std::option::Option<std::vec::Vec<crate::model::ChangedBlock>>, ) -> Self { self.changed_blocks = input; self } /// <p>The time when the <code>BlockToken</code> expires.</p> pub fn expiry_time(mut self, input: smithy_types::Instant) -> Self { self.expiry_time = Some(input); self } pub fn set_expiry_time( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.expiry_time = input; self } /// <p>The size of the volume in GB.</p> pub fn volume_size(mut self, input: i64) -> Self { self.volume_size = Some(input); self } pub fn set_volume_size(mut self, input: std::option::Option<i64>) -> Self { self.volume_size = input; self } /// <p>The size of the block.</p> pub fn block_size(mut self, input: i32) -> Self { self.block_size = Some(input); self } pub fn set_block_size(mut self, input: std::option::Option<i32>) -> Self { self.block_size = input; self } /// <p>The token to use to retrieve the next page of results. This value is null when there /// are no more results to return.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListChangedBlocksOutput`](crate::output::ListChangedBlocksOutput) pub fn build(self) -> crate::output::ListChangedBlocksOutput { crate::output::ListChangedBlocksOutput { changed_blocks: self.changed_blocks, expiry_time: self.expiry_time, volume_size: self.volume_size, block_size: self.block_size, next_token: self.next_token, } } } } impl ListChangedBlocksOutput { /// Creates a new builder-style object to manufacture [`ListChangedBlocksOutput`](crate::output::ListChangedBlocksOutput) pub fn builder() -> crate::output::list_changed_blocks_output::Builder { crate::output::list_changed_blocks_output::Builder::default() } } #[non_exhaustive] pub struct GetSnapshotBlockOutput { /// <p>The size of the data in the block.</p> pub data_length: std::option::Option<i32>, /// <p>The data content of the block.</p> pub block_data: smithy_http::byte_stream::ByteStream, /// <p>The checksum generated for the block, which is Base64 encoded.</p> pub checksum: std::option::Option<std::string::String>, /// <p>The algorithm used to generate the checksum for the block, such as SHA256.</p> pub checksum_algorithm: std::option::Option<crate::model::ChecksumAlgorithm>, } impl std::fmt::Debug for GetSnapshotBlockOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetSnapshotBlockOutput"); formatter.field("data_length", &self.data_length); formatter.field("block_data", &"*** Sensitive Data Redacted ***"); formatter.field("checksum", &self.checksum); formatter.field("checksum_algorithm", &self.checksum_algorithm); formatter.finish() } } /// See [`GetSnapshotBlockOutput`](crate::output::GetSnapshotBlockOutput) pub mod get_snapshot_block_output { /// A builder for [`GetSnapshotBlockOutput`](crate::output::GetSnapshotBlockOutput) #[non_exhaustive] #[derive(std::default::Default, std::fmt::Debug)] pub struct Builder { pub(crate) data_length: std::option::Option<i32>, pub(crate) block_data: std::option::Option<smithy_http::byte_stream::ByteStream>, pub(crate) checksum: std::option::Option<std::string::String>, pub(crate) checksum_algorithm: std::option::Option<crate::model::ChecksumAlgorithm>, } impl Builder { /// <p>The size of the data in the block.</p> pub fn data_length(mut self, input: i32) -> Self { self.data_length = Some(input); self } pub fn set_data_length(mut self, input: std::option::Option<i32>) -> Self { self.data_length = input; self } /// <p>The data content of the block.</p> pub fn block_data(mut self, input: smithy_http::byte_stream::ByteStream) -> Self { self.block_data = Some(input); self } pub fn set_block_data( mut self, input: std::option::Option<smithy_http::byte_stream::ByteStream>, ) -> Self { self.block_data = input; self } /// <p>The checksum generated for the block, which is Base64 encoded.</p> pub fn checksum(mut self, input: impl Into<std::string::String>) -> Self { self.checksum = Some(input.into()); self } pub fn set_checksum(mut self, input: std::option::Option<std::string::String>) -> Self { self.checksum = input; self } /// <p>The algorithm used to generate the checksum for the block, such as SHA256.</p> pub fn checksum_algorithm(mut self, input: crate::model::ChecksumAlgorithm) -> Self { self.checksum_algorithm = Some(input); self } pub fn set_checksum_algorithm( mut self, input: std::option::Option<crate::model::ChecksumAlgorithm>, ) -> Self { self.checksum_algorithm = input; self } /// Consumes the builder and constructs a [`GetSnapshotBlockOutput`](crate::output::GetSnapshotBlockOutput) pub fn build(self) -> crate::output::GetSnapshotBlockOutput { crate::output::GetSnapshotBlockOutput { data_length: self.data_length, block_data: self.block_data.unwrap_or_default(), checksum: self.checksum, checksum_algorithm: self.checksum_algorithm, } } } } impl GetSnapshotBlockOutput { /// Creates a new builder-style object to manufacture [`GetSnapshotBlockOutput`](crate::output::GetSnapshotBlockOutput) pub fn builder() -> crate::output::get_snapshot_block_output::Builder { crate::output::get_snapshot_block_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CompleteSnapshotOutput { /// <p>The status of the snapshot.</p> pub status: std::option::Option<crate::model::Status>, } impl std::fmt::Debug for CompleteSnapshotOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CompleteSnapshotOutput"); formatter.field("status", &self.status); formatter.finish() } } /// See [`CompleteSnapshotOutput`](crate::output::CompleteSnapshotOutput) pub mod complete_snapshot_output { /// A builder for [`CompleteSnapshotOutput`](crate::output::CompleteSnapshotOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) status: std::option::Option<crate::model::Status>, } impl Builder { /// <p>The status of the snapshot.</p> pub fn status(mut self, input: crate::model::Status) -> Self { self.status = Some(input); self } pub fn set_status(mut self, input: std::option::Option<crate::model::Status>) -> Self { self.status = input; self } /// Consumes the builder and constructs a [`CompleteSnapshotOutput`](crate::output::CompleteSnapshotOutput) pub fn build(self) -> crate::output::CompleteSnapshotOutput { crate::output::CompleteSnapshotOutput { status: self.status, } } } } impl CompleteSnapshotOutput { /// Creates a new builder-style object to manufacture [`CompleteSnapshotOutput`](crate::output::CompleteSnapshotOutput) pub fn builder() -> crate::output::complete_snapshot_output::Builder { crate::output::complete_snapshot_output::Builder::default() } }
44.340461
127
0.610816
6431b7a20bd5936cf5d07f8aac40165d237ffb2f
2,972
use std::{convert::Infallible, marker::PhantomData}; use bytes::{Buf, Bytes, BytesMut}; use futures::{ channel::{self, mpsc::UnboundedReceiver}, stream, Stream, StreamExt, }; use hyper::{body, Body, Request, Response, StatusCode}; use serde::{Deserialize, Serialize}; use super::{Handler, HandlerFn}; use crate::{error::Result, rowbinary, sealed::Sealed}; // raw struct RawHandler<F>(Option<F>); impl<F> Handler for RawHandler<F> where F: FnOnce(Request<Body>) -> Response<Body> + Send + 'static, { type Control = (); fn make(&mut self) -> (HandlerFn, Self::Control) { let h = Box::new(self.0.take().expect("raw handler must be called only once")); (h, ()) } } impl<F> Sealed for RawHandler<F> {} fn raw(f: impl FnOnce(Request<Body>) -> Response<Body> + Send + 'static) -> impl Handler { RawHandler(Some(f)) } // failure pub fn failure(status: StatusCode) -> impl Handler { let reason = status.canonical_reason().unwrap_or("<unknown status code>"); raw(move |_req| { Response::builder() .status(status) .body(Body::from(reason)) .expect("invalid builder") }) } // provide pub fn provide<T>(rows: impl Stream<Item = T> + Send + 'static) -> impl Handler where T: Serialize, { let s = rows.map(|row| -> Result<Bytes> { let mut buffer = BytesMut::with_capacity(128); rowbinary::serialize_into(&mut buffer, &row)?; Ok(buffer.freeze()) }); raw(move |_req| Response::new(Body::wrap_stream(s))) } // record struct RecordHandler<T>(PhantomData<T>); impl<T> Handler for RecordHandler<T> where T: for<'a> Deserialize<'a> + Send + 'static, { type Control = RecordControl<T>; #[doc(hidden)] fn make(&mut self) -> (HandlerFn, Self::Control) { let (tx, rx) = channel::mpsc::unbounded(); let control = RecordControl(rx); let h = Box::new(move |req: Request<Body>| -> Response<Body> { let fut = async move { let body = req.into_body(); let mut buf = body::aggregate(body).await.expect("invalid request"); while buf.has_remaining() { let row = rowbinary::deserialize_from(&mut buf, &mut []) .expect("failed to deserialize"); tx.unbounded_send(row).expect("failed to send, test ended?"); } Ok::<_, Infallible>("") }; Response::new(Body::wrap_stream(stream::once(fut))) }); (h, control) } } impl<T> Sealed for RecordHandler<T> {} pub struct RecordControl<T>(UnboundedReceiver<T>); impl<T> RecordControl<T> { pub async fn collect<C>(self) -> C where C: Default + Extend<T>, { self.0.collect().await } } pub fn record<T>() -> impl Handler<Control = RecordControl<T>> where T: for<'a> Deserialize<'a> + Send + 'static, { RecordHandler(PhantomData) }
25.401709
90
0.586474
db8c6a6b8acfc76388ce24fffaff09973deacdfd
638
extern crate zbx_sender; use std::env; use zbx_sender::{Response, Result, Sender}; fn send_one_value(command: &str) -> Result<Response> { let sender = Sender::new(command.to_owned(), 10051); sender.send(("host1", "key1", "value")) } fn main() { let command = match env::args().nth(1) { Some(cmd) => cmd, None => { let name = env::args().nth(0).unwrap(); panic!("Usage: {} [command]", name) } }; match send_one_value(&command) { Ok(response) => println!("{:?} is success {} ", response, response.success()), Err(e) => println!("Error {}", e), } }
25.52
86
0.545455
efceff1afcf308a93b71d4c19842e1a1b23045ee
1,550
pub struct IconIceSkating { props: crate::Props, } impl yew::Component for IconIceSkating { type Properties = crate::Props; type Message = (); fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self { Self { props } } fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender { true } fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender { false } fn view(&self) -> yew::prelude::Html { yew::prelude::html! { <svg class=self.props.class.unwrap_or("") width=self.props.size.unwrap_or(24).to_string() height=self.props.size.unwrap_or(24).to_string() viewBox="0 0 24 24" fill=self.props.fill.unwrap_or("none") stroke=self.props.color.unwrap_or("currentColor") stroke-width=self.props.stroke_width.unwrap_or(2).to_string() stroke-linecap=self.props.stroke_linecap.unwrap_or("round") stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round") > <svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><rect fill="none" height="24" width="24"/><g><path d="M21,17c0,1.66-1.34,3-3,3h-2v-2h3l-0.01-6l-5.71-1.43C12.4,10.35,11.7,9.76,11.32,9H8V8h3.02L11,7H8V6h3V3H3v15h3v2H2v2h16 c2.76,0,5-2.24,5-5H21z M14,20H8v-2h6V20z"/></g></svg> </svg> } } }
33.695652
355
0.584516
f52a64d40b266df70e5e77eee24b895c02e658be
89
mod conf; pub mod handler; pub mod input; pub mod opt; pub mod output; pub use conf::*;
11.125
16
0.696629
382c4a3fa670fd49f96cbab2b7b2fb0346371af6
4,957
#![feature(test)] #![feature(specialization)] use std::alloc::System; #[global_allocator] static GLOBAL: System = System; #[macro_use] extern crate swc_common; extern crate swc_ecma_ast; extern crate swc_ecma_parser; extern crate swc_ecma_transforms; extern crate test; extern crate testing; use swc_common::{FileName, FoldWith}; use swc_ecma_parser::{Parser, Session, SourceFileInput, Syntax}; use swc_ecma_transforms::{compat, helpers}; use test::Bencher; static SOURCE: &'static str = r#" 'use strict'; /** * Extract red color out of a color integer: * * 0x00DEAD -> 0x00 * * @param {Number} color * @return {Number} */ function red( color ) { let foo = 3.14; return color >> 16; } /** * Extract green out of a color integer: * * 0x00DEAD -> 0xDE * * @param {Number} color * @return {Number} */ function green( color ) { return ( color >> 8 ) & 0xFF; } /** * Extract blue color out of a color integer: * * 0x00DEAD -> 0xAD * * @param {Number} color * @return {Number} */ function blue( color ) { return color & 0xFF; } /** * Converts an integer containing a color such as 0x00DEAD to a hex * string, such as '#00DEAD'; * * @param {Number} int * @return {String} */ function intToHex( int ) { const mask = '#000000'; const hex = int.toString( 16 ); return mask.substring( 0, 7 - hex.length ) + hex; } /** * Converts a hex string containing a color such as '#00DEAD' to * an integer, such as 0x00DEAD; * * @param {Number} num * @return {String} */ function hexToInt( hex ) { return parseInt( hex.substring( 1 ), 16 ); } module.exports = { red, green, blue, intToHex, hexToInt, };"#; /// Benchmark a folder macro_rules! tr { ($b:expr, $tr:expr) => { $b.bytes = SOURCE.len() as _; let _ = ::testing::run_test(false, |cm, handler| { let fm = cm.new_source_file(FileName::Anon, SOURCE.into()); let mut parser = Parser::new( Session { handler: &handler }, Syntax::default(), SourceFileInput::from(&*fm), None, ); let module = parser .parse_module() .map_err(|mut e| { e.emit(); () }) .unwrap(); helpers::HELPERS.set(&Default::default(), || { let mut tr = $tr(); $b.iter(|| { let module = module.clone(); test::black_box(module.fold_with(&mut tr)) }); Ok(()) }) }); }; } #[bench] fn all(b: &mut Bencher) { tr!(b, || chain!( compat::es2017(), compat::es2016(), compat::es2015(), compat::es3(), )); } #[bench] fn es2018(b: &mut Bencher) { tr!(b, || compat::es2018()); } #[bench] fn es2018_object_rest_spread(b: &mut Bencher) { tr!(b, || compat::es2018::object_rest_spread()); } #[bench] fn es2017(b: &mut Bencher) { tr!(b, || compat::es2017()); } #[bench] fn es2017_async_to_generator(b: &mut Bencher) { tr!(b, || compat::es2017::async_to_generator()); } #[bench] fn es2016(b: &mut Bencher) { tr!(b, || compat::es2016()); } #[bench] fn es2016_exponentation(b: &mut Bencher) { tr!(b, || compat::es2016::exponentation()); } #[bench] fn es2015(b: &mut Bencher) { tr!(b, || compat::es2015()); } #[bench] fn es2015_arrow(b: &mut Bencher) { tr!(b, || compat::es2015::arrow()); } #[bench] fn es2015_block_scoped_fn(b: &mut Bencher) { tr!(b, || compat::es2015::BlockScopedFns); } #[bench] fn es2015_block_scoping(b: &mut Bencher) { tr!(b, || compat::es2015::block_scoping()); } #[bench] fn es2015_classes(b: &mut Bencher) { tr!(b, || compat::es2015::Classes); } #[bench] fn es2015_computed_props(b: &mut Bencher) { tr!(b, compat::es2015::computed_properties); } #[bench] fn es2015_destructuring(b: &mut Bencher) { tr!(b, compat::es2015::destructuring); } #[bench] fn es2015_duplicate_keys(b: &mut Bencher) { tr!(b, || compat::es2015::duplicate_keys()); } #[bench] fn es2015_parameters(b: &mut Bencher) { tr!(b, || compat::es2015::parameters()); } #[bench] fn es2015_fn_name(b: &mut Bencher) { tr!(b, || compat::es2015::function_name()); } #[bench] fn es2015_for_of(b: &mut Bencher) { tr!(b, || compat::es2015::for_of()); } #[bench] fn es2015_instanceof(b: &mut Bencher) { tr!(b, || compat::es2015::InstanceOf); } #[bench] fn es2015_shorthand_property(b: &mut Bencher) { tr!(b, || compat::es2015::Shorthand); } #[bench] fn es2015_spread(b: &mut Bencher) { tr!(b, || compat::es2015::Spread); } #[bench] fn es2015_sticky_regex(b: &mut Bencher) { tr!(b, || compat::es2015::StickyRegex); } #[bench] fn es2015_typeof_symbol(b: &mut Bencher) { tr!(b, || compat::es2015::TypeOfSymbol); } #[bench] fn es3(b: &mut Bencher) { tr!(b, || compat::es3()); }
19.907631
71
0.575953
e61d63f96276fff3ca3c408c8d795125b611ebef
4,607
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/vhdirk/gir-files.git) // DO NOT EDIT use crate::Certificate; use glib::object::IsA; use glib::translate::*; use std::fmt; glib::wrapper! { #[doc(alias = "GMimeCertificateList")] pub struct CertificateList(Object<ffi::GMimeCertificateList, ffi::GMimeCertificateListClass>); match fn { type_ => || ffi::g_mime_certificate_list_get_type(), } } impl CertificateList { #[doc(alias = "g_mime_certificate_list_new")] pub fn new() -> CertificateList { assert_initialized_main_thread!(); unsafe { from_glib_full(ffi::g_mime_certificate_list_new()) } } } impl Default for CertificateList { fn default() -> Self { Self::new() } } pub const NONE_CERTIFICATE_LIST: Option<&CertificateList> = None; pub trait CertificateListExt: 'static { #[doc(alias = "g_mime_certificate_list_add")] fn add(&self, cert: &impl IsA<Certificate>) -> i32; #[doc(alias = "g_mime_certificate_list_clear")] fn clear(&self); #[doc(alias = "g_mime_certificate_list_contains")] fn contains(&self, cert: &impl IsA<Certificate>) -> bool; #[doc(alias = "g_mime_certificate_list_get_certificate")] #[doc(alias = "get_certificate")] fn certificate(&self, index: i32) -> Option<Certificate>; #[doc(alias = "g_mime_certificate_list_index_of")] fn index_of(&self, cert: &impl IsA<Certificate>) -> i32; #[doc(alias = "g_mime_certificate_list_insert")] fn insert(&self, index: i32, cert: &impl IsA<Certificate>); #[doc(alias = "g_mime_certificate_list_length")] fn length(&self) -> i32; #[doc(alias = "g_mime_certificate_list_remove")] fn remove(&self, cert: &impl IsA<Certificate>) -> bool; #[doc(alias = "g_mime_certificate_list_remove_at")] fn remove_at(&self, index: i32) -> bool; #[doc(alias = "g_mime_certificate_list_set_certificate")] fn set_certificate(&self, index: i32, cert: &impl IsA<Certificate>); } impl<O: IsA<CertificateList>> CertificateListExt for O { fn add(&self, cert: &impl IsA<Certificate>) -> i32 { unsafe { ffi::g_mime_certificate_list_add( self.as_ref().to_glib_none().0, cert.as_ref().to_glib_none().0, ) } } fn clear(&self) { unsafe { ffi::g_mime_certificate_list_clear(self.as_ref().to_glib_none().0); } } fn contains(&self, cert: &impl IsA<Certificate>) -> bool { unsafe { from_glib(ffi::g_mime_certificate_list_contains( self.as_ref().to_glib_none().0, cert.as_ref().to_glib_none().0, )) } } fn certificate(&self, index: i32) -> Option<Certificate> { unsafe { from_glib_full(ffi::g_mime_certificate_list_get_certificate( self.as_ref().to_glib_none().0, index, )) } } fn index_of(&self, cert: &impl IsA<Certificate>) -> i32 { unsafe { ffi::g_mime_certificate_list_index_of( self.as_ref().to_glib_none().0, cert.as_ref().to_glib_none().0, ) } } fn insert(&self, index: i32, cert: &impl IsA<Certificate>) { unsafe { ffi::g_mime_certificate_list_insert( self.as_ref().to_glib_none().0, index, cert.as_ref().to_glib_none().0, ); } } fn length(&self) -> i32 { unsafe { ffi::g_mime_certificate_list_length(self.as_ref().to_glib_none().0) } } fn remove(&self, cert: &impl IsA<Certificate>) -> bool { unsafe { from_glib(ffi::g_mime_certificate_list_remove( self.as_ref().to_glib_none().0, cert.as_ref().to_glib_none().0, )) } } fn remove_at(&self, index: i32) -> bool { unsafe { from_glib(ffi::g_mime_certificate_list_remove_at( self.as_ref().to_glib_none().0, index, )) } } fn set_certificate(&self, index: i32, cert: &impl IsA<Certificate>) { unsafe { ffi::g_mime_certificate_list_set_certificate( self.as_ref().to_glib_none().0, index, cert.as_ref().to_glib_none().0, ); } } } impl fmt::Display for CertificateList { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("CertificateList") } }
28.974843
98
0.585196
cc0f55af26987635cee7d65677c7eeb1e5dbc741
3,606
// Copyright 2017 The syscall.rs Project Developers. See the // COPYRIGHT file at the top-level directory of this distribution. // // Licensed under the Apache License, Veecxon 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except accoebxng to those terms. //! This library was built for PowerPC64 Linux. // See src/linux-powerpc/mod.rs for more information #![allow(unused_assignments)] #![allow(unused_variables)] pub mod nr; #[inline(always)] pub unsafe fn syscall0(mut n: usize) -> usize { let ret: usize; llvm_asm!("sc bns+ 1f neg $1, $1 1:" : "+{r0}"(n) "={r3}"(ret) : : "cr0" "memory" "r4" "r5" "r6" "r7" "r8" "r9" "r10" "r11" "r12" : "volatile"); ret } #[inline(always)] pub unsafe fn syscall1(mut n: usize, mut a1: usize) -> usize { llvm_asm!("sc bns+ 1f neg $1, $1 1:" : "+{r0}"(n) "+{r3}"(a1) : : "cr0" "memory" "r4" "r5" "r6" "r7" "r8" "r9" "r10" "r11" "r12" : "volatile"); a1 } #[inline(always)] pub unsafe fn syscall2(mut n: usize, mut a1: usize, mut a2: usize) -> usize { llvm_asm!("sc bns+ 1f neg $1, $1 1:" : "+{r0}"(n) "+{r3}"(a1) "+{r4}"(a2) : : "cr0" "memory" "r5" "r6" "r7" "r8" "r9" "r10" "r11" "r12" : "volatile"); a1 } #[inline(always)] pub unsafe fn syscall3(mut n: usize, mut a1: usize, mut a2: usize, mut a3: usize) -> usize { llvm_asm!("sc bns+ 1f neg $1, $1 1:" : "+{r0}"(n) "+{r3}"(a1) "+{r4}"(a2) "+{r5}"(a3) : : "cr0" "memory" "r6" "r7" "r8" "r9" "r10" "r11" "r12" : "volatile"); a1 } #[inline(always)] pub unsafe fn syscall4(mut n: usize, mut a1: usize, mut a2: usize, mut a3: usize, mut a4: usize) -> usize { llvm_asm!("sc bns+ 1f neg $1, $1 1:" : "+{r0}"(n) "+{r3}"(a1) "+{r4}"(a2) "+{r5}"(a3) "+{r6}"(a4) : : "cr0" "memory" "r7" "r8" "r9" "r10" "r11" "r12" : "volatile"); a1 } #[inline(always)] pub unsafe fn syscall5(mut n: usize, mut a1: usize, mut a2: usize, mut a3: usize, mut a4: usize, mut a5: usize) -> usize { llvm_asm!("sc bns+ 1f neg $1, $1 1:" : "+{r0}"(n) "+{r3}"(a1) "+{r4}"(a2) "+{r5}"(a3) "+{r6}"(a4) "+{r7}"(a5) : : "cr0" "memory" "r8" "r9" "r10" "r11" "r12" : "volatile"); a1 } #[inline(always)] pub unsafe fn syscall6(mut n: usize, mut a1: usize, mut a2: usize, mut a3: usize, mut a4: usize, mut a5: usize, mut a6: usize) -> usize { llvm_asm!("sc bns+ 1f neg $1, $1 1:" : "+{r0}"(n) "+{r3}"(a1) "+{r4}"(a2) "+{r5}"(a3) "+{r6}"(a4) "+{r7}"(a5) "+{r8}"(a6) : : "cr0" "memory" "r9" "r10" "r11" "r12" : "volatile"); a1 }
26.910448
77
0.415141
5d1baae9f71c47bfffce12234fb77f4d73e9fb1c
722
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // use crate::factory_reset::{facade::FactoryResetFacade, types::FactoryResetMethod}; use crate::server::Facade; use anyhow::Error; use async_trait::async_trait; use serde_json::{to_value, Value}; #[async_trait(?Send)] impl Facade for FactoryResetFacade { async fn handle_request(&self, method: String, _args: Value) -> Result<Value, Error> { match method.parse()? { FactoryResetMethod::FactoryReset => { let result = self.factory_reset().await?; Ok(to_value(result)?) } } } }
32.818182
90
0.663435
891780ca1981435638ae8c350dd7a8eaa5673af2
1,919
/* Copyright 2020 Takashi Ogura Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use nalgebra::RealField; /// Information for copying joint state of other joint /// /// For example, `Mimic` is used to calculate the position of the gripper(R) from /// gripper(L). In that case, the code like below will be used. /// /// ``` /// let mimic_for_gripper_r = k::joint::Mimic::new(-1.0, 0.0); /// ``` /// /// output position (mimic_position() is calculated by `joint positions = joint[name] * multiplier + origin` /// #[derive(Debug, Clone)] pub struct Mimic<T: RealField> { pub multiplier: T, pub origin: T, } impl<T> Mimic<T> where T: RealField, { /// Create new instance of Mimic /// /// # Examples /// /// ``` /// let m = k::joint::Mimic::<f64>::new(1.0, 0.5); /// ``` pub fn new(multiplier: T, origin: T) -> Self { Mimic { multiplier, origin } } /// Calculate the mimic joint position /// /// # Examples /// /// ``` /// let m = k::joint::Mimic::<f64>::new(1.0, 0.5); /// assert_eq!(m.mimic_position(0.2), 0.7); // 0.2 * 1.0 + 0.5 /// ``` /// /// ``` /// let m = k::joint::Mimic::<f64>::new(-2.0, -0.4); /// assert_eq!(m.mimic_position(0.2), -0.8); // 0.2 * -2.0 - 0.4 /// ``` pub fn mimic_position(&self, from_position: T) -> T { from_position * self.multiplier.clone() + self.origin.clone() } }
29.075758
108
0.60865
0903ef8a7df4a9d1e973b753c8635ab3dfe0e765
3,731
#[doc = "Register `PROC_IN_SYNC_BYPASS` reader"] pub struct R(crate::R<PROC_IN_SYNC_BYPASS_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PROC_IN_SYNC_BYPASS_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PROC_IN_SYNC_BYPASS_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PROC_IN_SYNC_BYPASS_SPEC>) -> Self { R(reader) } } #[doc = "Register `PROC_IN_SYNC_BYPASS` writer"] pub struct W(crate::W<PROC_IN_SYNC_BYPASS_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PROC_IN_SYNC_BYPASS_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PROC_IN_SYNC_BYPASS_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PROC_IN_SYNC_BYPASS_SPEC>) -> Self { W(writer) } } #[doc = "Field `PROC_IN_SYNC_BYPASS` reader - "] pub struct PROC_IN_SYNC_BYPASS_R(crate::FieldReader<u32, u32>); impl PROC_IN_SYNC_BYPASS_R { pub(crate) fn new(bits: u32) -> Self { PROC_IN_SYNC_BYPASS_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PROC_IN_SYNC_BYPASS_R { type Target = crate::FieldReader<u32, u32>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PROC_IN_SYNC_BYPASS` writer - "] pub struct PROC_IN_SYNC_BYPASS_W<'a> { w: &'a mut W, } impl<'a> PROC_IN_SYNC_BYPASS_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !0x3fff_ffff) | (value as u32 & 0x3fff_ffff); self.w } } impl R { #[doc = "Bits 0:29"] #[inline(always)] pub fn proc_in_sync_bypass(&self) -> PROC_IN_SYNC_BYPASS_R { PROC_IN_SYNC_BYPASS_R::new((self.bits & 0x3fff_ffff) as u32) } } impl W { #[doc = "Bits 0:29"] #[inline(always)] pub fn proc_in_sync_bypass(&mut self) -> PROC_IN_SYNC_BYPASS_W { PROC_IN_SYNC_BYPASS_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "For each bit, if 1, bypass the input synchronizer between that GPIO and the GPIO input register in the SIO. The input synchronizers should generally be unbypassed, to avoid injecting metastabilities into processors. If you're feeling brave, you can bypass to save two cycles of input latency. This register applies to GPIO 0...29. This register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api). For information about available fields see [proc_in_sync_bypass](index.html) module"] pub struct PROC_IN_SYNC_BYPASS_SPEC; impl crate::RegisterSpec for PROC_IN_SYNC_BYPASS_SPEC { type Ux = u32; } #[doc = "`read()` method returns [proc_in_sync_bypass::R](R) reader structure"] impl crate::Readable for PROC_IN_SYNC_BYPASS_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [proc_in_sync_bypass::W](W) writer structure"] impl crate::Writable for PROC_IN_SYNC_BYPASS_SPEC { type Writer = W; } #[doc = "`reset()` method sets PROC_IN_SYNC_BYPASS to value 0"] impl crate::Resettable for PROC_IN_SYNC_BYPASS_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
33.612613
300
0.656928
79c7c6131a26a5a25d76ac45308c2d642c328485
7,418
//! Version 1 (RFC 1157) use rasn::{ types::{Integer, ObjectIdentifier, OctetString}, AsnType, Decode, Encode, }; use smi::v1::{NetworkAddress, ObjectName, ObjectSyntax, TimeTicks}; #[derive(AsnType, Debug, Clone, Decode, Encode)] pub struct Message<T> { pub version: Integer, pub community: OctetString, pub data: T, } impl<T> Message<T> { pub const VERSION_1: u64 = 0; } #[derive(AsnType, Debug, Clone, Decode, Encode)] #[rasn(choice)] pub enum Pdus { GetRequest(GetRequest), GetNextRequest(GetNextRequest), GetResponse(GetResponse), SetRequest(SetRequest), Trap(Trap), } #[derive(AsnType, Debug, Clone, Decode, Encode)] #[rasn(tag(0))] #[rasn(delegate)] pub struct GetRequest(pub Pdu); #[derive(AsnType, Debug, Clone, Decode, Encode)] #[rasn(tag(1))] #[rasn(delegate)] pub struct GetNextRequest(pub Pdu); #[derive(AsnType, Debug, Clone, Decode, Encode)] #[rasn(tag(2))] #[rasn(delegate)] pub struct GetResponse(pub Pdu); #[derive(AsnType, Debug, Clone, Decode, Encode)] #[rasn(tag(3))] #[rasn(delegate)] pub struct SetRequest(pub Pdu); pub type VarBindList = alloc::vec::Vec<VarBind>; #[derive(AsnType, Debug, Clone, Decode, Encode)] pub struct Pdu { pub request_id: Integer, pub error_status: Integer, pub error_index: Integer, pub variable_bindings: VarBindList, } impl Pdu { pub const ERROR_STATUS_NO_ERROR: u64 = 0; pub const ERROR_STATUS_TOO_BIG: u64 = 1; pub const ERROR_STATUS_NO_SUCH_NAME: u64 = 2; pub const ERROR_STATUS_BAD_VALUE: u64 = 3; pub const ERROR_STATUS_READ_ONLY: u64 = 4; pub const ERROR_STATUS_GEN_ERR: u64 = 5; } #[derive(AsnType, Debug, Clone, Decode, Encode)] #[rasn(tag(context, 4))] pub struct Trap { pub enterprise: ObjectIdentifier, pub agent_addr: NetworkAddress, pub generic_trap: Integer, pub specific_trap: Integer, pub time_stamp: TimeTicks, pub variable_bindings: VarBindList, } #[derive(AsnType, Debug, Clone, Decode, Encode)] pub struct VarBind { pub name: ObjectName, pub value: ObjectSyntax, } #[cfg(test)] mod tests { use super::{Message, Trap, VarBind}; use alloc::{string::String, string::ToString, vec, vec::Vec}; use rasn::types::ObjectIdentifier; use smi::v1::{Gauge, IpAddress, NetworkAddress, TimeTicks}; fn string_oid(oid: impl AsRef<[u32]>) -> String { oid.as_ref() .iter() .map(ToString::to_string) .collect::<Vec<_>>() .join(".") } #[test] fn trap() { #[cfg_attr(rustfmt, rustfmt_skip)] let decode_data = [ // SEQUENCE -> Message 0x30, 0x4f, // INTEGER -> Message::version 0x02, 0x01, 0x00, // OCTET STRING -> Message::community 0x04, 0x06, // "public" 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, // application constructed tag 4 -> Trap 0xa4, 0x42, // OID -> Trap::enterprise 0x06, 0x0c, // 1.3.6.1.4.1.11779.1.42.3.7.8 0x2b, 0x06, 0x01, 0x04, 0x01, 0xDC, 0x03, 0x01, 0x2a, 0x03, 0x07, 0x08, // OCTET STRING -> Trap::agent_addr 0x40, 0x04, // NetworkAddress:Internet(IpAddress(10.11.12.13)) 0x0a, 0x0b, 0x0c, 0x0d, // INTEGER -> Trap::generic_trap 0x02, 0x01, 0x06, // INTEGER -> Trap::specific_trap 0x02, 0x01, 0x02, // application tag 3 -> TimeTicks 0x43, 0x02, // 11_932 0x2e, 0x9c, // SEQUENCE -> VarBindList 0x30, 0x22, // SEQUENCE -> VarBind 0x30, 0x0d, // OID -> VarBind::name 0x06, 0x07, // 1.3.6.1.2.1.1.3 0x2b, 0x06, 0x01, 0x02, 0x01, 0x01, 0x03, // application tag 3 -> TimeTicks 0x43, 0x02, // 11_932 0x2e, 0x9c, // SEQUENCE -> VarBind 0x30, 0x11, // OID -> VarBind::name 0x06, 0x0c, // 1.3.6.1.4.1.11779.1.42.2.1.7 0x2b, 0x06, 0x01, 0x04, 0x01, 0xDC, 0x03, 0x01, 0x2a, 0x02, 0x01, 0x07, // application tag 2 -> Gauge 0x42, 0x01, 0x01, ]; let decode_msg: Message<Trap> = rasn::ber::decode(&decode_data).unwrap(); assert_eq!(decode_msg.version, 0.into()); assert_eq!(decode_msg.community, "public".as_bytes()); assert_eq!( string_oid(decode_msg.data.enterprise), "1.3.6.1.4.1.11779.1.42.3.7.8" ); assert_eq!( decode_msg.data.agent_addr, NetworkAddress::Internet(IpAddress([10, 11, 12, 13][..].into())) ); assert_eq!(decode_msg.data.generic_trap, 6.into()); assert_eq!(decode_msg.data.specific_trap, 2.into()); assert_eq!(decode_msg.data.time_stamp, TimeTicks(11_932)); // TODO: Currently this incorectly decodes as an empty vector. assert_eq!(decode_msg.data.variable_bindings.len(), 2); let encode_msg = Message { version: 0.into(), community: "public".into(), data: Trap { enterprise: ObjectIdentifier::new_unchecked(vec![ 1, 3, 6, 1, 4, 1, 11779, 1, 42, 3, 7, 8, ]), agent_addr: NetworkAddress::Internet(IpAddress([10, 11, 12, 13][..].into())), generic_trap: 6.into(), specific_trap: 2.into(), time_stamp: TimeTicks(11_932), variable_bindings: vec![ VarBind { name: ObjectIdentifier::new_unchecked(vec![1, 3, 6, 1, 2, 1, 1, 3]), value: TimeTicks(11_932).into(), }, VarBind { name: ObjectIdentifier::new_unchecked(vec![ 1, 3, 6, 1, 4, 1, 11779, 1, 42, 2, 1, 7, ]), value: Gauge(1).into(), }, ], }, }; // TODO: Currently presence of any elements in `variable_bindings` throws a choice error. // Encoding succeeds and is correct with that field empty. There's a smoke-test for that // below for now. let encode_data = rasn::ber::encode(&encode_msg).unwrap(); assert_eq!(encode_data, decode_data); let encode_msg_no_bindings = Message { data: Trap { variable_bindings: vec![], ..encode_msg.data }, ..encode_msg }; assert!(rasn::ber::encode(&encode_msg_no_bindings).is_ok()); } }
34.184332
97
0.501078
f9622dadd57e8f82f5591f0c5ea20ed6552149d6
2,989
#![allow(clippy::module_inception)] #![allow(clippy::too_many_arguments)] #![allow(clippy::ptr_arg)] #![allow(clippy::large_enum_variant)] #![doc = "generated by AutoRust 0.1.0"] #[cfg(feature = "package-preview-2021-09")] pub mod package_preview_2021_09; #[cfg(all(feature = "package-preview-2021-09", not(feature = "no-default-version")))] pub use package_preview_2021_09::{models, operations, operations::Error}; #[cfg(feature = "package-2020-03-01")] pub mod package_2020_03_01; #[cfg(all(feature = "package-2020-03-01", not(feature = "no-default-version")))] pub use package_2020_03_01::{models, operations, operations::Error}; #[cfg(feature = "package-2020-03-01-preview")] pub mod package_2020_03_01_preview; #[cfg(all(feature = "package-2020-03-01-preview", not(feature = "no-default-version")))] pub use package_2020_03_01_preview::{models, operations, operations::Error}; #[cfg(feature = "package-2021-03-01-preview")] pub mod package_2021_03_01_preview; use azure_core::setters; #[cfg(all(feature = "package-2021-03-01-preview", not(feature = "no-default-version")))] pub use package_2021_03_01_preview::{models, operations, operations::Error}; pub fn config( http_client: std::sync::Arc<dyn azure_core::HttpClient>, token_credential: Box<dyn azure_core::TokenCredential>, ) -> OperationConfigBuilder { OperationConfigBuilder { http_client, base_path: None, token_credential, token_credential_resource: None, } } pub struct OperationConfigBuilder { http_client: std::sync::Arc<dyn azure_core::HttpClient>, base_path: Option<String>, token_credential: Box<dyn azure_core::TokenCredential>, token_credential_resource: Option<String>, } impl OperationConfigBuilder { setters! { base_path : String => Some (base_path) , token_credential_resource : String => Some (token_credential_resource) , } pub fn build(self) -> OperationConfig { OperationConfig { http_client: self.http_client, base_path: self.base_path.unwrap_or_else(|| "https://management.azure.com".to_owned()), token_credential: Some(self.token_credential), token_credential_resource: self .token_credential_resource .unwrap_or_else(|| "https://management.azure.com/".to_owned()), } } } pub struct OperationConfig { http_client: std::sync::Arc<dyn azure_core::HttpClient>, base_path: String, token_credential: Option<Box<dyn azure_core::TokenCredential>>, token_credential_resource: String, } impl OperationConfig { pub fn http_client(&self) -> &dyn azure_core::HttpClient { self.http_client.as_ref() } pub fn base_path(&self) -> &str { self.base_path.as_str() } pub fn token_credential(&self) -> Option<&dyn azure_core::TokenCredential> { self.token_credential.as_deref() } pub fn token_credential_resource(&self) -> &str { self.token_credential_resource.as_str() } }
40.945205
130
0.702911
acb06b5b86324d5eadab9c2992b6b5a1fcc0d43b
7,943
use async_trait::async_trait; use super::*; use crate::error::TaskError; use crate::task::{Request, Task, TaskOptions}; use chrono::{DateTime, SecondsFormat, Utc}; use std::time::SystemTime; #[derive(Clone, Serialize, Deserialize)] struct TestTaskParams { a: i32, } struct TestTask { request: Request<Self>, options: TaskOptions, } #[async_trait] impl Task for TestTask { const NAME: &'static str = "test"; const ARGS: &'static [&'static str] = &["a"]; type Params = TestTaskParams; type Returns = (); fn from_request(request: Request<Self>, options: TaskOptions) -> Self { Self { request, options } } fn request(&self) -> &Request<Self> { &self.request } fn options(&self) -> &TaskOptions { &self.options } async fn run(&self, _params: Self::Params) -> Result<(), TaskError> { Ok(()) } } const JSON: &str = "[[],{\"a\":4},{\"callbacks\":null,\"errbacks\":null,\"chain\":null,\"chord\":null}]"; #[test] fn test_serialize_body() { let body = MessageBody::<TestTask>::new(TestTaskParams { a: 4 }); let serialized = serde_json::to_string(&body).unwrap(); assert_eq!(serialized, JSON); } #[test] fn test_deserialize_body_with_args() { let message = Message { properties: MessageProperties { correlation_id: "aaa".into(), content_type: "application/json".into(), content_encoding: "utf-8".into(), reply_to: None, }, headers: MessageHeaders { id: "aaa".into(), task: "TestTask".into(), ..Default::default() }, raw_body: Vec::from(JSON), }; let body = message.body::<TestTask>().unwrap(); assert_eq!(body.1.a, 4); } const YAML: &str = "---\n- []\n- a: 4\n- callbacks: ~\n errbacks: ~\n chain: ~\n chord: ~\n"; #[test] fn test_yaml_serialize_body() { let body = MessageBody::<TestTask>::new(TestTaskParams { a: 4 }); let serialized = serde_yaml::to_string(&body).unwrap(); assert_eq!(serialized, YAML); } #[test] fn test_yaml_deserialize_body_with_args() { let message = Message { properties: MessageProperties { correlation_id: "aaa".into(), content_type: "application/x-yaml".into(), content_encoding: "utf-8".into(), reply_to: None, }, headers: MessageHeaders { id: "aaa".into(), task: "TestTask".into(), ..Default::default() }, raw_body: Vec::from(YAML), }; let body = message.body::<TestTask>().unwrap(); assert_eq!(body.1.a, 4); } const PICKLE: &[u8] = b"\x80\x03(]}(X\x01\x00\x00\x00aJ\x04\x00\x00\x00u}(X\x09\x00\x00\x00callbacksNX\x08\x00\x00\x00errbacksNX\x05\x00\x00\x00chainNX\x05\x00\x00\x00chordNut."; #[test] fn test_pickle_serialize_body() { let body = MessageBody::<TestTask>::new(TestTaskParams { a: 4 }); let serialized = serde_pickle::to_vec(&body, serde_pickle::SerOptions::new()).unwrap(); // println!("{}", String::from_utf8(serialized.split_off(1)).unwrap()); assert_eq!(serialized, PICKLE.to_vec()); } #[test] fn test_pickle_deserialize_body_with_args() { let message = Message { properties: MessageProperties { correlation_id: "aaa".into(), content_type: "application/x-python-serialize".into(), content_encoding: "utf-8".into(), reply_to: None, }, headers: MessageHeaders { id: "aaa".into(), task: "TestTask".into(), ..Default::default() }, raw_body: PICKLE.to_vec(), }; let body = message.body::<TestTask>().unwrap(); assert_eq!(body.1.a, 4); } const MSGPACK: &[u8] = &[147, 144, 145, 4, 148, 192, 192, 192, 192]; #[test] fn test_msgpack_serialize_body() { let body = MessageBody::<TestTask>::new(TestTaskParams { a: 4 }); let serialized = rmp_serde::to_vec(&body).unwrap(); // println!("{:?}", serialized); assert_eq!(serialized, MSGPACK); } #[test] fn test_msgpack_deserialize_body_with_args() { let message = Message { properties: MessageProperties { correlation_id: "aaa".into(), content_type: "application/x-msgpack".into(), content_encoding: "utf-8".into(), reply_to: None, }, headers: MessageHeaders { id: "aaa".into(), task: "TestTask".into(), ..Default::default() }, raw_body: MSGPACK.to_vec(), }; let body = message.body::<TestTask>().unwrap(); assert_eq!(body.1.a, 4); } #[test] /// Tests message serialization. fn test_serialization() { let now = DateTime::<Utc>::from(SystemTime::now()); // HACK: round this to milliseconds because that will happen during conversion // from message -> delivery. let now_str = now.to_rfc3339_opts(SecondsFormat::Millis, false); let now = DateTime::<Utc>::from(DateTime::parse_from_rfc3339(&now_str).unwrap()); let message = Message { properties: MessageProperties { correlation_id: "aaa".into(), content_type: "application/json".into(), content_encoding: "utf-8".into(), reply_to: Some("bbb".into()), }, headers: MessageHeaders { id: "aaa".into(), task: "add".into(), lang: Some("rust".into()), root_id: Some("aaa".into()), parent_id: Some("000".into()), group: Some("A".into()), meth: Some("method_name".into()), shadow: Some("add-these".into()), eta: Some(now), expires: Some(now), retries: Some(1), timelimit: (Some(30), Some(60)), argsrepr: Some("(1)".into()), kwargsrepr: Some("{'y': 2}".into()), origin: Some("gen123@piper".into()), }, raw_body: Vec::from(JSON), }; let ser_msg_result = message.json_serialized(); assert!(ser_msg_result.is_ok()); let ser_msg = ser_msg_result.unwrap(); let ser_msg_json: serde_json::Value = serde_json::from_slice(&ser_msg[..]).unwrap(); assert_eq!(ser_msg_json["content-encoding"], String::from("utf-8")); assert_eq!( ser_msg_json["content-type"], String::from("application/json") ); assert_eq!( ser_msg_json["properties"]["correlation_id"], String::from("aaa") ); assert_eq!(ser_msg_json["properties"]["reply_to"], String::from("bbb")); assert_ne!(ser_msg_json["properties"]["delivery_tag"], ""); assert_eq!( ser_msg_json["properties"]["body_encoding"], String::from("base64") ); assert_eq!(ser_msg_json["headers"]["id"], String::from("aaa")); assert_eq!(ser_msg_json["headers"]["task"], String::from("add")); assert_eq!(ser_msg_json["headers"]["lang"], String::from("rust")); assert_eq!(ser_msg_json["headers"]["root_id"], String::from("aaa")); assert_eq!(ser_msg_json["headers"]["parent_id"], String::from("000")); assert_eq!(ser_msg_json["headers"]["group"], String::from("A")); assert_eq!(ser_msg_json["headers"]["meth"], String::from("method_name")); assert_eq!(ser_msg_json["headers"]["shadow"], String::from("add-these")); assert_eq!(ser_msg_json["headers"]["retries"], 1); assert_eq!(ser_msg_json["headers"]["eta"], now_str); assert_eq!(ser_msg_json["headers"]["expires"], now_str); assert_eq!(ser_msg_json["headers"]["timelimit"][0], 30); assert_eq!(ser_msg_json["headers"]["timelimit"][1], 60); assert_eq!(ser_msg_json["headers"]["argsrepr"], "(1)"); assert_eq!(ser_msg_json["headers"]["kwargsrepr"], "{'y': 2}"); assert_eq!(ser_msg_json["headers"]["origin"], "gen123@piper"); let body = base64::decode(ser_msg_json["body"].as_str().unwrap()).unwrap(); assert_eq!(body.len(), 73); assert_eq!(&body, JSON.as_bytes()); }
33.944444
178
0.590079
21b8898a51293fda91078dfe36c6d5df874ee165
2,639
//! Rust implementation of Amazon's Ion data format //! //! There you go #![recursion_limit = "197"] #![feature(custom_attribute)] #![allow(unreachable_code)] #[macro_use] extern crate pest; extern crate num_bigint; extern crate num_bigdecimal; extern crate num_rational; use num_bigint::BigInt; use num_bigdecimal::BigDecimal; pub mod parser; /// Enum of all possible types of elements in an ion document. /// /// These are mapped to either rust literal types or other anion types. /// All values are Options, with a None value corresponding to the /// equivalent 'null' value of the ion document. /// #[derive(Debug, PartialEq, Clone)] pub enum AnionValue { /// Pure null type // Null, /// true, false, null.bool Boolean(Option<bool>), /// Bigint values (unlimited) Integer(Option<BigInt>), /// 64 bit floating point value Float(Option<f64>), /// Exact precision real number value Decimal(Option<BigDecimal>), /// string. String(Option<String>), } /// Variant of AnionValue enum that does not permit null values. /// This includes the 'pure NULL' null value, though this may /// be removed due to naming sillyness. This is much closer in type /// to true JSON values. /// #[derive(Debug, PartialEq, Clone)] pub enum NonNullAnionValue { /// The NULL value - in the NonNullValue! Null, /// true, false Boolean(bool), /// Bigint values (unlimited) Integer(i32), /// 64 bit floating point value Float(f64), /// Exact precision real number value Decimal(BigDecimal), /// String of utf8 characters String(String), } pub use parser::Rdp; macro_rules! impl_int_conversion { ($int_type:ident) => { impl From<$int_type> for AnionValue { #[inline] fn from(int: $int_type) -> Self { AnionValue::Integer(Some(BigInt::from(int))) } } } } impl_int_conversion!(i8); impl_int_conversion!(i16); impl_int_conversion!(i32); impl_int_conversion!(i64); impl_int_conversion!(u8); impl_int_conversion!(u16); impl_int_conversion!(u32); impl_int_conversion!(u64); impl_int_conversion!(BigInt); macro_rules! impl_float_conversion { ($float_type:ident) => { impl From<$float_type> for AnionValue { #[inline] fn from(float_val: $float_type) -> Self { AnionValue::Float(Some(float_val as f64)) } } } } impl_float_conversion!(f32); impl_float_conversion!(f64); impl From<bool> for AnionValue { fn from(boolean: bool) -> Self { AnionValue::Boolean(Some(boolean)) } } // impl From<i32> for AnionValue { // fn from(int: i32) -> AnionValue // { // AnionValue::Integer(Some(BigInt::from(int))) // } // }
21.112
71
0.68094
7941a1f90e01621701fcccaa82d7148091a2d1ab
5,885
//! Activity argument parsing use crate::Command; use clap::{App, AppSettings, ArgMatches, SubCommand}; pub(super) const BASE: &'static str = "activity"; pub(super) const BASE_GOALS: &'static str = "goals"; pub(super) const BASE_LIFETIME_STATS: &'static str = "lifetime_stats"; pub(super) const BASE_SUMMARY: &'static str = "summary"; pub(super) const BASE_TS: &'static str = "time_series"; pub(super) const BASE_TS_CALORIES: &'static str = "calories"; pub(super) const BASE_TS_CALORIES_BMR: &'static str = "calories_bmr"; pub(super) const BASE_TS_STEPS: &'static str = "steps"; pub(super) const BASE_TS_DISTANCE: &'static str = "distance"; pub(super) const BASE_TS_FLOORS: &'static str = "floors"; pub(super) const BASE_TS_ELEVATION: &'static str = "elevation"; pub(super) const BASE_TS_SEDENTARY: &'static str = "sedentary"; pub(super) const BASE_TS_LIGHTLY_ACTIVE: &'static str = "lightly_active"; pub(super) const BASE_TS_FAIRLY_ACTIVE: &'static str = "fairly_active"; pub(super) const BASE_TS_VERY_ACTIVE: &'static str = "very_active"; pub(super) const BASE_TS_ACTIVITY_CALORIES: &'static str = "activity_calories"; pub(super) fn app() -> App<'static, 'static> { App::new(BASE) .about("User activity data commands") .setting(AppSettings::SubcommandRequiredElseHelp) .subcommand( SubCommand::with_name(BASE_GOALS).about("Print a summary of the user's activity goals"), ) .subcommand( SubCommand::with_name(BASE_LIFETIME_STATS) .about("Print a summary of the user's lifetime activity statistics"), ) .subcommand( SubCommand::with_name(BASE_SUMMARY) .about("Print a summary of the user's recent activities"), ) .subcommand( SubCommand::with_name(BASE_TS) .about("User activity time series data commands") .setting(AppSettings::SubcommandRequiredElseHelp) .subcommand( SubCommand::with_name(BASE_TS_CALORIES) .about("Print calories time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_CALORIES_BMR) .about("Print calories BMR time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_STEPS).about("Print steps time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_DISTANCE) .about("Print distance time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_FLOORS).about("Print floors time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_ELEVATION) .about("Print elevation time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_SEDENTARY) .about("Print minutes sedentary time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_LIGHTLY_ACTIVE) .about("Print lightly active time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_FAIRLY_ACTIVE) .about("Print fairly active time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_VERY_ACTIVE) .about("Print very active time series data"), ) .subcommand( SubCommand::with_name(BASE_TS_ACTIVITY_CALORIES) .about("Print activity calories time series data"), ), ) } pub(super) fn get_command(matches: &ArgMatches) -> Command { match matches.subcommand() { (BASE_GOALS, Some(_)) => Command::GetActivityGoals, (BASE_LIFETIME_STATS, Some(_)) => Command::GetActivityLifetimeStats, (BASE_SUMMARY, Some(_)) => Command::GetActivitySummary, (BASE_TS, Some(activity_ts_matches)) => { use fitbit_web_api::activity::time_series::Resource; match activity_ts_matches.subcommand() { (BASE_TS_CALORIES, Some(_)) => Command::GetActivityTimeSeries(Resource::Calories), (BASE_TS_CALORIES_BMR, Some(_)) => { Command::GetActivityTimeSeries(Resource::CaloriesBMR) } (BASE_TS_STEPS, Some(_)) => Command::GetActivityTimeSeries(Resource::Steps), (BASE_TS_DISTANCE, Some(_)) => Command::GetActivityTimeSeries(Resource::Distance), (BASE_TS_FLOORS, Some(_)) => Command::GetActivityTimeSeries(Resource::Floors), (BASE_TS_ELEVATION, Some(_)) => Command::GetActivityTimeSeries(Resource::Elevation), (BASE_TS_SEDENTARY, Some(_)) => Command::GetActivityTimeSeries(Resource::Sedentary), (BASE_TS_LIGHTLY_ACTIVE, Some(_)) => { Command::GetActivityTimeSeries(Resource::LightlyActive) } (BASE_TS_FAIRLY_ACTIVE, Some(_)) => { Command::GetActivityTimeSeries(Resource::FairlyActive) } (BASE_TS_VERY_ACTIVE, Some(_)) => { Command::GetActivityTimeSeries(Resource::VeryActive) } (BASE_TS_ACTIVITY_CALORIES, Some(_)) => { Command::GetActivityTimeSeries(Resource::ActivityCalories) } ("", None) => super::invalid_command_exit(), _ => unreachable!(), } } ("", None) => super::invalid_command_exit(), _ => unreachable!(), } }
47.08
100
0.575021
90b03d569ba39b5a9235940e468ccc95f9494ca0
20,382
//! Create interactive, native cross-platform applications. mod state; pub use state::State; use crate::clipboard::{self, Clipboard}; use crate::conversion; use crate::mouse; use crate::{ Color, Command, Debug, Error, Executor, Mode, Proxy, Runtime, Settings, Size, Subscription, }; use iced_futures::futures; use iced_futures::futures::channel::mpsc; use iced_graphics::compositor; use iced_graphics::window; use iced_native::program::Program; use iced_native::user_interface::{self, UserInterface}; use std::mem::ManuallyDrop; /// An interactive, native cross-platform application. /// /// This trait is the main entrypoint of Iced. Once implemented, you can run /// your GUI application by simply calling [`run`]. It will run in /// its own window. /// /// An [`Application`] can execute asynchronous actions by returning a /// [`Command`] in some of its methods. /// /// When using an [`Application`] with the `debug` feature enabled, a debug view /// can be toggled by pressing `F12`. pub trait Application: Program { /// The data needed to initialize your [`Application`]. type Flags; /// Initializes the [`Application`] with the flags provided to /// [`run`] as part of the [`Settings`]. /// /// Here is where you should return the initial state of your app. /// /// Additionally, you can return a [`Command`] if you need to perform some /// async action in the background on startup. This is useful if you want to /// load state from a file, perform an initial HTTP request, etc. fn new(flags: Self::Flags) -> (Self, Command<Self::Message>); /// Returns the current title of the [`Application`]. /// /// This title can be dynamic! The runtime will automatically update the /// title of your application when necessary. fn title(&self) -> String; /// Returns the event `Subscription` for the current state of the /// application. /// /// The messages produced by the `Subscription` will be handled by /// [`update`](#tymethod.update). /// /// A `Subscription` will be kept alive as long as you keep returning it! /// /// By default, it returns an empty subscription. fn subscription(&self) -> Subscription<Self::Message> { Subscription::none() } /// Returns the current [`Application`] mode. /// /// The runtime will automatically transition your application if a new mode /// is returned. /// /// By default, an application will run in windowed mode. fn mode(&self) -> Mode { Mode::Windowed } /// Returns the background [`Color`] of the [`Application`]. /// /// By default, it returns [`Color::WHITE`]. fn background_color(&self) -> Color { Color::WHITE } /// Returns the scale factor of the [`Application`]. /// /// It can be used to dynamically control the size of the UI at runtime /// (i.e. zooming). /// /// For instance, a scale factor of `2.0` will make widgets twice as big, /// while a scale factor of `0.5` will shrink them to half their size. /// /// By default, it returns `1.0`. fn scale_factor(&self) -> f64 { 1.0 } /// Returns whether the [`Application`] should be terminated. /// /// By default, it returns `false`. fn should_exit(&self) -> bool { false } } /// Runs an [`Application`] with an executor, compositor, and the provided /// settings. pub fn run<A, E, C>( settings: Settings<A::Flags>, compositor_settings: C::Settings, ) -> Result<(), Error> where A: Application + 'static, E: Executor + 'static, C: window::Compositor<Renderer = A::Renderer> + 'static, { use futures::task; use futures::Future; use winit::event_loop::EventLoop; let mut debug = Debug::new(); debug.startup_started(); let event_loop = EventLoop::with_user_event(); let mut proxy = event_loop.create_proxy(); let mut runtime = { let proxy = Proxy::new(event_loop.create_proxy()); let executor = E::new().map_err(Error::ExecutorCreationFailed)?; Runtime::new(executor, proxy) }; let (application, init_command) = { let flags = settings.flags; runtime.enter(|| A::new(flags)) }; let subscription = application.subscription(); let builder = settings.window.into_builder( &application.title(), application.mode(), event_loop.primary_monitor(), settings.id, ); log::info!("Window builder: {:#?}", builder); let window = builder .build(&event_loop) .map_err(Error::WindowCreationFailed)?; #[cfg(target_arch = "wasm32")] { use winit::platform::web::WindowExtWebSys; let canvas = window.canvas(); let window = web_sys::window().unwrap(); let document = window.document().unwrap(); let body = document.body().unwrap(); let _ = body .append_child(&canvas) .expect("Append canvas to HTML body"); } let mut clipboard = Clipboard::connect(&window); let (compositor, renderer) = C::new(compositor_settings, Some(&window))?; run_command( init_command, &mut runtime, &mut clipboard, &mut proxy, &window, || compositor.fetch_information(), ); runtime.track(subscription); let (mut sender, receiver) = mpsc::unbounded(); let mut instance = Box::pin(run_instance::<A, E, C>( application, compositor, renderer, runtime, clipboard, proxy, debug, receiver, window, settings.exit_on_close_request, )); let mut context = task::Context::from_waker(task::noop_waker_ref()); platform::run(event_loop, move |event, _, control_flow| { use winit::event_loop::ControlFlow; if let ControlFlow::Exit = control_flow { return; } let event = match event { winit::event::Event::WindowEvent { event: winit::event::WindowEvent::ScaleFactorChanged { new_inner_size, .. }, window_id, } => Some(winit::event::Event::WindowEvent { event: winit::event::WindowEvent::Resized(*new_inner_size), window_id, }), _ => event.to_static(), }; if let Some(event) = event { sender.start_send(event).expect("Send event"); let poll = instance.as_mut().poll(&mut context); *control_flow = match poll { task::Poll::Pending => ControlFlow::Wait, task::Poll::Ready(_) => ControlFlow::Exit, }; } }) } async fn run_instance<A, E, C>( mut application: A, mut compositor: C, mut renderer: A::Renderer, mut runtime: Runtime<E, Proxy<A::Message>, A::Message>, mut clipboard: Clipboard, mut proxy: winit::event_loop::EventLoopProxy<A::Message>, mut debug: Debug, mut receiver: mpsc::UnboundedReceiver<winit::event::Event<'_, A::Message>>, window: winit::window::Window, exit_on_close_request: bool, ) where A: Application + 'static, E: Executor + 'static, C: window::Compositor<Renderer = A::Renderer> + 'static, { use iced_futures::futures::stream::StreamExt; use winit::event; let mut surface = compositor.create_surface(&window); let mut state = State::new(&application, &window); let mut viewport_version = state.viewport_version(); let physical_size = state.physical_size(); compositor.configure_surface( &mut surface, physical_size.width, physical_size.height, ); let mut user_interface = ManuallyDrop::new(build_user_interface( &mut application, user_interface::Cache::default(), &mut renderer, state.logical_size(), &mut debug, )); let mut mouse_interaction = mouse::Interaction::default(); let mut events = Vec::new(); let mut messages = Vec::new(); debug.startup_finished(); while let Some(event) = receiver.next().await { match event { event::Event::MainEventsCleared => { if events.is_empty() && messages.is_empty() { continue; } debug.event_processing_started(); let (interface_state, statuses) = user_interface.update( &events, state.cursor_position(), &mut renderer, &mut clipboard, &mut messages, ); debug.event_processing_finished(); for event in events.drain(..).zip(statuses.into_iter()) { runtime.broadcast(event); } if !messages.is_empty() || matches!( interface_state, user_interface::State::Outdated, ) { let cache = ManuallyDrop::into_inner(user_interface).into_cache(); // Update application update( &mut application, &mut runtime, &mut clipboard, &mut proxy, &mut debug, &mut messages, &window, || compositor.fetch_information(), ); // Update window state.synchronize(&application, &window); let should_exit = application.should_exit(); user_interface = ManuallyDrop::new(build_user_interface( &mut application, cache, &mut renderer, state.logical_size(), &mut debug, )); if should_exit { break; } } debug.draw_started(); let new_mouse_interaction = user_interface.draw(&mut renderer, state.cursor_position()); debug.draw_finished(); if new_mouse_interaction != mouse_interaction { window.set_cursor_icon(conversion::mouse_interaction( new_mouse_interaction, )); mouse_interaction = new_mouse_interaction; } window.request_redraw(); } event::Event::PlatformSpecific(event::PlatformSpecific::MacOS( event::MacOS::ReceivedUrl(url), )) => { use iced_native::event; events.push(iced_native::Event::PlatformSpecific( event::PlatformSpecific::MacOS(event::MacOS::ReceivedUrl( url, )), )); } event::Event::UserEvent(message) => { messages.push(message); } event::Event::RedrawRequested(_) => { let physical_size = state.physical_size(); if physical_size.width == 0 || physical_size.height == 0 { continue; } debug.render_started(); let current_viewport_version = state.viewport_version(); if viewport_version != current_viewport_version { let logical_size = state.logical_size(); debug.layout_started(); user_interface = ManuallyDrop::new( ManuallyDrop::into_inner(user_interface) .relayout(logical_size, &mut renderer), ); debug.layout_finished(); debug.draw_started(); let new_mouse_interaction = user_interface .draw(&mut renderer, state.cursor_position()); if new_mouse_interaction != mouse_interaction { window.set_cursor_icon(conversion::mouse_interaction( new_mouse_interaction, )); mouse_interaction = new_mouse_interaction; } debug.draw_finished(); compositor.configure_surface( &mut surface, physical_size.width, physical_size.height, ); viewport_version = current_viewport_version; } match compositor.present( &mut renderer, &mut surface, state.viewport(), state.background_color(), &debug.overlay(), ) { Ok(()) => { debug.render_finished(); // TODO: Handle animations! // Maybe we can use `ControlFlow::WaitUntil` for this. } Err(error) => match error { // This is an unrecoverable error. compositor::SurfaceError::OutOfMemory => { panic!("{:?}", error); } _ => { debug.render_finished(); // Try rendering again next frame. window.request_redraw(); } }, } } event::Event::WindowEvent { event: window_event, .. } => { if requests_exit(&window_event, state.modifiers()) && exit_on_close_request { break; } state.update(&window, &window_event, &mut debug); if let Some(event) = conversion::window_event( &window_event, state.scale_factor(), state.modifiers(), ) { events.push(event); } } _ => {} } } // Manually drop the user interface drop(ManuallyDrop::into_inner(user_interface)); } /// Returns true if the provided event should cause an [`Application`] to /// exit. pub fn requests_exit( event: &winit::event::WindowEvent<'_>, _modifiers: winit::event::ModifiersState, ) -> bool { use winit::event::WindowEvent; match event { WindowEvent::CloseRequested => true, #[cfg(target_os = "macos")] WindowEvent::KeyboardInput { input: winit::event::KeyboardInput { virtual_keycode: Some(winit::event::VirtualKeyCode::Q), state: winit::event::ElementState::Pressed, .. }, .. } if _modifiers.logo() => true, _ => false, } } /// Builds a [`UserInterface`] for the provided [`Application`], logging /// [`struct@Debug`] information accordingly. pub fn build_user_interface<'a, A: Application>( application: &'a mut A, cache: user_interface::Cache, renderer: &mut A::Renderer, size: Size, debug: &mut Debug, ) -> UserInterface<'a, A::Message, A::Renderer> { debug.view_started(); let view = application.view(); debug.view_finished(); debug.layout_started(); let user_interface = UserInterface::build(view, size, cache, renderer); debug.layout_finished(); user_interface } /// Updates an [`Application`] by feeding it the provided messages, spawning any /// resulting [`Command`], and tracking its [`Subscription`]. pub fn update<A: Application, E: Executor>( application: &mut A, runtime: &mut Runtime<E, Proxy<A::Message>, A::Message>, clipboard: &mut Clipboard, proxy: &mut winit::event_loop::EventLoopProxy<A::Message>, debug: &mut Debug, messages: &mut Vec<A::Message>, window: &winit::window::Window, graphics_info: impl FnOnce() -> compositor::Information + Copy, ) { for message in messages.drain(..) { debug.log_message(&message); debug.update_started(); let command = runtime.enter(|| application.update(message)); debug.update_finished(); run_command(command, runtime, clipboard, proxy, window, graphics_info); } let subscription = application.subscription(); runtime.track(subscription); } /// Runs the actions of a [`Command`]. pub fn run_command<Message: 'static + std::fmt::Debug + Send, E: Executor>( command: Command<Message>, runtime: &mut Runtime<E, Proxy<Message>, Message>, clipboard: &mut Clipboard, proxy: &mut winit::event_loop::EventLoopProxy<Message>, window: &winit::window::Window, _graphics_info: impl FnOnce() -> compositor::Information + Copy, ) { use iced_native::command; use iced_native::system; use iced_native::window; for action in command.actions() { match action { command::Action::Future(future) => { runtime.spawn(future); } command::Action::Clipboard(action) => match action { clipboard::Action::Read(tag) => { let message = tag(clipboard.read()); proxy .send_event(message) .expect("Send message to event loop"); } clipboard::Action::Write(contents) => { clipboard.write(contents); } }, command::Action::Window(action) => match action { window::Action::Resize { width, height } => { window.set_inner_size(winit::dpi::LogicalSize { width, height, }); } window::Action::Move { x, y } => { window.set_outer_position(winit::dpi::LogicalPosition { x, y, }); } }, command::Action::System(action) => match action { system::Action::QueryInformation(_tag) => { #[cfg(feature = "system")] { let graphics_info = _graphics_info(); let proxy = proxy.clone(); let _ = std::thread::spawn(move || { let information = crate::system::information(graphics_info); let message = _tag(information); proxy .send_event(message) .expect("Send message to event loop") }); } } }, } } } #[cfg(not(target_arch = "wasm32"))] mod platform { pub fn run<T, F>( mut event_loop: winit::event_loop::EventLoop<T>, event_handler: F, ) -> Result<(), super::Error> where F: 'static + FnMut( winit::event::Event<'_, T>, &winit::event_loop::EventLoopWindowTarget<T>, &mut winit::event_loop::ControlFlow, ), { use winit::platform::run_return::EventLoopExtRunReturn; let _ = event_loop.run_return(event_handler); Ok(()) } } #[cfg(target_arch = "wasm32")] mod platform { pub fn run<T, F>( event_loop: winit::event_loop::EventLoop<T>, event_handler: F, ) -> ! where F: 'static + FnMut( winit::event::Event<'_, T>, &winit::event_loop::EventLoopWindowTarget<T>, &mut winit::event_loop::ControlFlow, ), { event_loop.run(event_handler) } }
31.6
80
0.521048